From cc1ba0d3d7988c214cc65af34fb8aad37894e24b Mon Sep 17 00:00:00 2001 From: Apoorv Khandelwal Date: Sat, 16 Nov 2024 12:58:38 -0500 Subject: [PATCH 001/141] Update pyproject.toml --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d90f2fb4..bc9e1bf4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,9 +11,9 @@ authors = [ ] description = "Automatically initialize distributed PyTorch environments" readme = "README.md" +license = {file = "LICENSE"} urls = { Repository = "https://github.com/apoorvkh/torchrunx.git", Documentation = "https://torchrunx.readthedocs.io" } classifiers = [ - "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", @@ -26,6 +26,7 @@ dependencies = [ "fabric>=3.2", "torch>=2.0", # torch.distributed depends on numpy + # note: torch<=2.2 needs numpy<2 "numpy>=1.20", ] [dependency-groups] From 0c7b533b3811270d5d46cb52c62f0aff325dd656 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 16 Nov 2024 13:50:02 -0500 Subject: [PATCH 002/141] switch to GPL license --- LICENSE | 695 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 674 insertions(+), 21 deletions(-) diff --git a/LICENSE b/LICENSE index 2ce617f8..f288702d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,674 @@ -MIT License - -Copyright (c) 2024 Apoorv Khandelwal (apoorvkh) & Peter Curtin (pmcurtin) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. From d5419339a12993896e7ee6cdfc075b2524c9eafd Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 17 Nov 2024 14:37:51 -0500 Subject: [PATCH 003/141] updates to readme --- CONTRIBUTING.md | 6 ++--- README.md | 69 +++++++++++++++++++++++++++---------------------- 2 files changed, 40 insertions(+), 35 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1cf5b2b6..15839e0e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,13 +2,11 @@ We use the [`uv`](https://github.com/astral-sh/uv) package manager. Simply [install `uv`](https://github.com/astral-sh/uv#installation) and run `uv sync` in this repository to build the environment. Run `source .venv/bin/activate` to activate the environment. -We use `ruff check` for linting, `ruff format` for formatting, `pyright` for static type checking, and `pytest` for testing. - -We build wheels with `uv build` and upload to [PyPI](https://pypi.org/project/torchrunx) with `uv publish`. Our release pipeline is powered by Github Actions. +We use `ruff check` for linting, `ruff format` for formatting, `pyright` for static type checking, and `pytest` for testing. We expect all such checks to pass before merging changes to the main branch. We build wheels with `uv build` and upload to [PyPI](https://pypi.org/project/torchrunx) with `uv publish`. Our CI pipelines are powered by Github Actions. ## Pull Requests -Make a pull request with your changes on Github and we'll try to look at soon! If addressing a specific issue, mention it in the PR, and offer a short explanation of your fix. If adding a new feature, explain why it's meaningful and belongs in __torchrunx__. +Make a pull request with your changes on Github and we'll try to look at it soon! If addressing a specific issue, mention it in the PR, and offer a short explanation of your fix. If adding a new feature, explain why it's meaningful and belongs in __torchrunx__. ## Testing diff --git a/README.md b/README.md index 74f3ce25..ea43e9bc 100644 --- a/README.md +++ b/README.md @@ -7,24 +7,56 @@ [![Docs](https://readthedocs.org/projects/torchrunx/badge/?version=stable)](https://torchrunx.readthedocs.io) [![GitHub License](https://img.shields.io/github/license/apoorvkh/torchrunx)](https://github.com/apoorvkh/torchrunx/blob/main/LICENSE) +**Automatically distribute PyTorch functions onto multiple machines or GPUs** + By [Apoorv Khandelwal](http://apoorvkh.com) and [Peter Curtin](https://github.com/pmcurtin) -**Automatically distribute PyTorch functions onto multiple machines or GPUs** +--- + +`torchrunx` is a more convenient, *functional* replacement for CLI-based distributed PyTorch launchers (`torchrun`, `accelerate`, `deepspeed`, and so forth). Simply put, you can launch functions in Python like this [(complete examples below)](#demo): + +```python +def my_train_function(): ... + +torchrunx.launch( + my_train_function, + hostnames=["localhost", "other_node"], + workers_per_host=2 # number of GPUs +) +``` + +This library uniquely offers: + +1. **An automatic launcher that just works for everyone.** No system-specific dependencies and orchestration for *automatic* distribution. `torchrunx` is an SSH-based, pure-Python library that is universally easy to install. + +2. **Returned control over the CLI.** Our library permits conventional commands (`python my_script.py ...`), in contrast to launchers that override the `python` executable in a cumbersome way (e.g. `torchrun --nproc_per_node=2 --nnodes=2 --node_rank=0 --master_addr=100.43.331.111 --master_port=1234 my_script.py ...`). Users can define their CLI as they wish and determine exactly which launcher/script arguments they want to expose. + +3. **Support for more complex workflows in a single script.** Your workflow may have independent steps that need different parallelizations (e.g. comparing training throughput on 4, then 8 GPUs; training on 8 GPUs, testing on 1 GPU; and so forth). CLI-based launchers naively parallelize the entire script for exactly N GPUs. In contrast, our library treats these steps in a modular way and permits degrees of parallelism in a single script. We clean memory leaks (which are unfortunately common in PyTorch) as we go, so previous steps won't crash or adversely affect future steps. + +4. **Better handling of system failures.** By default, your "work" is inherently coupled to your main Python process. If the system kills one of your workers (e.g. due to RAM OOM or segmentation faults), there is no way to fail gracefully in Python. Your processes might hang for at least 10 minutes (the NCCL timeout) or become perpetual zombies. `torchrunx` decouples "launcher" and "worker" processes. If the system kills a worker, our launcher immediately raises a `WorkerFailure` exception, which users can handle as they wish. We always clean up all nodes, so no more zombies! + +5. **Bonus features.** + - Fine-grained, custom handling of logging, environment variables, and exception propagation. We have nice defaults too: no more interleaved logs and irrelevant exceptions! + - No need to manually set up a [`dist.init_process_group`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) + - We automatically detect and infer settings from SLURM environments. + - Start multi-node training from Python notebooks! + +There's more on our [roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement): higher-order parallelism, support for debuggers, fuller typing, etc! ## Installation +**Requires:** Linux (+ SSH & shared filesystems if using multiple machines) + ```bash pip install torchrunx ``` -**Requires:** Linux (with shared filesystem & SSH access if using multiple machines) +## Working demos -## Demo - -Here's a simple example where we "train" a model on two nodes (with 2 GPUs each). +Here's a simple example where we train a model on two nodes (with 2 GPUs each).
- Training code + Vanilla PyTorch ```python import os @@ -68,28 +100,3 @@ if __name__ == "__main__": ### [Full API](https://torchrunx.readthedocs.io/stable/api.html) ### [Advanced Usage](https://torchrunx.readthedocs.io/stable/advanced.html) - -## Why should I use this? - -Whether you have 1 GPU, 8 GPUs, or 8 machines: - -__Features__ - -- Our [`launch()`](https://torchrunx.readthedocs.io/stable/api.html#torchrunx.launch) utility is super _Pythonic_ - - Return objects from your workers - - Run `python script.py` instead of `torchrun script.py` - - Launch multi-node functions, even from Python Notebooks -- Fine-grained control over logging, environment variables, exception handling, etc. -- Automatic integration with SLURM - -__Robustness__ - -- If you want to run a complex, _modular_ workflow in __one__ script - - don't parallelize your entire script: just the functions you want! - - no worries about memory leaks or OS failures - -__Convenience__ - -- If you don't want to: - - set up [`dist.init_process_group`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) yourself - - manually SSH into every machine and `torchrun --master-ip --master-port ...`, babysit failed processes, etc. From 693a6b73e5d056e4c9fda900e443974e7c4d2028 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 17 Nov 2024 15:06:17 -0500 Subject: [PATCH 004/141] placeholders for other libraries --- README.md | 95 +++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 64 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index ea43e9bc..29089782 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ By [Apoorv Khandelwal](http://apoorvkh.com) and [Peter Curtin](https://github.co --- -`torchrunx` is a more convenient, *functional* replacement for CLI-based distributed PyTorch launchers (`torchrun`, `accelerate`, `deepspeed`, and so forth). Simply put, you can launch functions in Python like this [(complete examples below)](#demo): +`torchrunx` is a more convenient, *functional* replacement for CLI-based distributed PyTorch launchers (`torchrun`, `accelerate`, `deepspeed`, and so forth). Simply put, you can launch functions in Python like this [(complete examples below)](#examples): ```python def my_train_function(): ... @@ -51,38 +51,37 @@ There's more on our [roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is% pip install torchrunx ``` -## Working demos +## [Full API](https://torchrunx.readthedocs.io/stable/api.html) +## [Advanced Usage](https://torchrunx.readthedocs.io/stable/advanced.html) -Here's a simple example where we train a model on two nodes (with 2 GPUs each). - -
- Vanilla PyTorch - - ```python - import os - import torch - - def train(): - rank = int(os.environ['RANK']) - local_rank = int(os.environ['LOCAL_RANK']) +## Examples - model = torch.nn.Linear(10, 10).to(local_rank) - ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank]) - optimizer = torch.optim.AdamW(ddp_model.parameters()) - - optimizer.zero_grad() - outputs = ddp_model(torch.randn(5, 10)) - labels = torch.randn(5, 10).to(local_rank) - torch.nn.functional.mse_loss(outputs, labels).backward() - optimizer.step() - - if rank == 0: - return model - ``` +Here's a simple example where we train a model on two nodes (with 2 GPUs each). - You could also use `transformers.Trainer` (or similar) to automatically handle all the multi-GPU / DDP code above. -
+### Vanilla PyTorch +```python +import os +import torch + +def train(): + rank = int(os.environ['RANK']) + local_rank = int(os.environ['LOCAL_RANK']) + + model = torch.nn.Linear(10, 10).to(local_rank) + ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank]) + optimizer = torch.optim.AdamW(ddp_model.parameters()) + + for step in range(10): + optimizer.zero_grad() + outputs = ddp_model(torch.randn(5, 10)) + labels = torch.randn(5, 10).to(local_rank) + torch.nn.functional.mse_loss(outputs, labels).backward() + optimizer.step() + + if rank == 0: + return model +``` ```python import torchrunx as trx @@ -98,5 +97,39 @@ if __name__ == "__main__": torch.save(trained_model.state_dict(), "model.pth") ``` -### [Full API](https://torchrunx.readthedocs.io/stable/api.html) -### [Advanced Usage](https://torchrunx.readthedocs.io/stable/advanced.html) +### With other libraries + +
+ Accelerate + + ```python + ``` +
+ +
+ HF Trainer + + ```python + ``` +
+ +
+ Deepspeed + + ```python + ``` +
+ +
+ PyTorch Lightning + + ```python + ``` +
+ +
+ MosaicML Composer + + ```python + ``` +
From 2aa837dc2bae1b3115c118bb4feadcc0ed62cba0 Mon Sep 17 00:00:00 2001 From: Apoorv Khandelwal Date: Fri, 22 Nov 2024 00:03:33 -0500 Subject: [PATCH 005/141] Update README.md --- README.md | 96 ++++++++++++++++++++++++------------------------------- 1 file changed, 41 insertions(+), 55 deletions(-) diff --git a/README.md b/README.md index 29089782..c11d8788 100644 --- a/README.md +++ b/README.md @@ -7,25 +7,57 @@ [![Docs](https://readthedocs.org/projects/torchrunx/badge/?version=stable)](https://torchrunx.readthedocs.io) [![GitHub License](https://img.shields.io/github/license/apoorvkh/torchrunx)](https://github.com/apoorvkh/torchrunx/blob/main/LICENSE) -**Automatically distribute PyTorch functions onto multiple machines or GPUs** - By [Apoorv Khandelwal](http://apoorvkh.com) and [Peter Curtin](https://github.com/pmcurtin) --- -`torchrunx` is a more convenient, *functional* replacement for CLI-based distributed PyTorch launchers (`torchrun`, `accelerate`, `deepspeed`, and so forth). Simply put, you can launch functions in Python like this [(complete examples below)](#examples): +`torchrunx` is a more convenient, *functional* replacement for CLI-based distributed PyTorch launchers (`torchrun`, `accelerate launch`, `deepspeed`, etc). Simply put, you can launch functions (onto multiple GPUs or machines) in Python like: + +
+ Training code (expand) + +```python +import os +import torch + +def train(): + rank = int(os.environ['RANK']) + local_rank = int(os.environ['LOCAL_RANK']) + + model = torch.nn.Linear(10, 10).to(local_rank) + ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank]) + optimizer = torch.optim.AdamW(ddp_model.parameters()) + + for step in range(10): + optimizer.zero_grad() + outputs = ddp_model(torch.randn(5, 10)) + labels = torch.randn(5, 10).to(local_rank) + torch.nn.functional.mse_loss(outputs, labels).backward() + optimizer.step() + + if rank == 0: + return model +``` +
```python -def my_train_function(): ... +import torchrunx as trx -torchrunx.launch( - my_train_function, +def train(): ... # implemented above + +result = trx.launch( + func=train, hostnames=["localhost", "other_node"], workers_per_host=2 # number of GPUs ) + +trained_model = result.rank(0) +torch.save(trained_model.state_dict(), "model.pth") ``` -This library uniquely offers: +**Refer to our [API](https://torchrunx.readthedocs.io/stable/api.html) and [Advanced Usage Guide](https://torchrunx.readthedocs.io/stable/advanced.html) for many more capabilities!** + +**Why?** This library uniquely offers: 1. **An automatic launcher that just works for everyone.** No system-specific dependencies and orchestration for *automatic* distribution. `torchrunx` is an SSH-based, pure-Python library that is universally easy to install. @@ -41,7 +73,7 @@ This library uniquely offers: - We automatically detect and infer settings from SLURM environments. - Start multi-node training from Python notebooks! -There's more on our [roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement): higher-order parallelism, support for debuggers, fuller typing, etc! +On our [Roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement): higher-order parallelism, support for debuggers, fuller typing, and more! ## Installation @@ -51,53 +83,7 @@ There's more on our [roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is% pip install torchrunx ``` -## [Full API](https://torchrunx.readthedocs.io/stable/api.html) -## [Advanced Usage](https://torchrunx.readthedocs.io/stable/advanced.html) - -## Examples - -Here's a simple example where we train a model on two nodes (with 2 GPUs each). - -### Vanilla PyTorch - -```python -import os -import torch - -def train(): - rank = int(os.environ['RANK']) - local_rank = int(os.environ['LOCAL_RANK']) - - model = torch.nn.Linear(10, 10).to(local_rank) - ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank]) - optimizer = torch.optim.AdamW(ddp_model.parameters()) - - for step in range(10): - optimizer.zero_grad() - outputs = ddp_model(torch.randn(5, 10)) - labels = torch.randn(5, 10).to(local_rank) - torch.nn.functional.mse_loss(outputs, labels).backward() - optimizer.step() - - if rank == 0: - return model -``` - -```python -import torchrunx as trx - -if __name__ == "__main__": - result = trx.launch( - func=train, - hostnames=["localhost", "other_node"], - workers_per_host=2 # number of GPUs - ) - - trained_model = result.rank(0) - torch.save(trained_model.state_dict(), "model.pth") -``` - -### With other libraries +## Examples with other libraries
Accelerate From cc1546dc7d1a33333fa75a742e81e52329aca57e Mon Sep 17 00:00:00 2001 From: Apoorv Khandelwal Date: Fri, 22 Nov 2024 00:09:03 -0500 Subject: [PATCH 006/141] Update README.md --- README.md | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index c11d8788..4c866bae 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,17 @@ torch.save(trained_model.state_dict(), "model.pth") **Refer to our [API](https://torchrunx.readthedocs.io/stable/api.html) and [Advanced Usage Guide](https://torchrunx.readthedocs.io/stable/advanced.html) for many more capabilities!** -**Why?** This library uniquely offers: +## Installation + +**Requires:** Linux (+ SSH & shared filesystems if using multiple machines) + +```bash +pip install torchrunx +``` + +## Why? + +This library uniquely offers: 1. **An automatic launcher that just works for everyone.** No system-specific dependencies and orchestration for *automatic* distribution. `torchrunx` is an SSH-based, pure-Python library that is universally easy to install. @@ -73,15 +83,7 @@ torch.save(trained_model.state_dict(), "model.pth") - We automatically detect and infer settings from SLURM environments. - Start multi-node training from Python notebooks! -On our [Roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement): higher-order parallelism, support for debuggers, fuller typing, and more! - -## Installation - -**Requires:** Linux (+ SSH & shared filesystems if using multiple machines) - -```bash -pip install torchrunx -``` +On our [roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement): higher-order parallelism, support for debuggers, fuller typing, and more! ## Examples with other libraries From d61578a4e73151e5df6a467e80562addea8f4622 Mon Sep 17 00:00:00 2001 From: Apoorv Khandelwal Date: Fri, 22 Nov 2024 00:09:41 -0500 Subject: [PATCH 007/141] bump version to 0.3.0 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index bc9e1bf4..8598d58c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "torchrunx" -version = "0.2.4" +version = "0.3.0" authors = [ { name = "Apoorv Khandelwal", email = "mail@apoorvkh.com" }, { name = "Peter Curtin", email = "peter_curtin@brown.edu" }, From ea119c8629a32323336d90af1c4f798176a813d1 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Wed, 27 Nov 2024 22:46:19 -0500 Subject: [PATCH 008/141] updates to readme --- README.md | 80 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 51 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index 4c866bae..8a470314 100644 --- a/README.md +++ b/README.md @@ -9,18 +9,40 @@ By [Apoorv Khandelwal](http://apoorvkh.com) and [Peter Curtin](https://github.com/pmcurtin) +**The easiest way to run PyTorch on multiple GPUs or machines.** + --- -`torchrunx` is a more convenient, *functional* replacement for CLI-based distributed PyTorch launchers (`torchrun`, `accelerate launch`, `deepspeed`, etc). Simply put, you can launch functions (onto multiple GPUs or machines) in Python like: +**`torchrunx`** is a more convenient, *functional* replacement for CLI-based distributed PyTorch launchers (`torchrun`, `accelerate launch`, `deepspeed`, etc). + +Simply put, you can distribute PyTorch functions from Python like: + +```python +def train(): ... # implemented below + +import torchrunx as trx + +# Run train(num_steps=10) on 2 machines with 2 GPUs each + +result = trx.launch( + func=train, + func_kwargs=dict(num_steps=10), + hostnames=["localhost", "other_node"], + workers_per_host=2 +) + +trained_model = result.rank(0) +torch.save(trained_model.state_dict(), "model.pth") +```
- Training code (expand) + Training function (expand) ```python import os import torch -def train(): +def train(num_steps: int = 5): rank = int(os.environ['RANK']) local_rank = int(os.environ['LOCAL_RANK']) @@ -40,48 +62,48 @@ def train(): ```
-```python -import torchrunx as trx - -def train(): ... # implemented above - -result = trx.launch( - func=train, - hostnames=["localhost", "other_node"], - workers_per_host=2 # number of GPUs -) - -trained_model = result.rank(0) -torch.save(trained_model.state_dict(), "model.pth") -``` - **Refer to our [API](https://torchrunx.readthedocs.io/stable/api.html) and [Advanced Usage Guide](https://torchrunx.readthedocs.io/stable/advanced.html) for many more capabilities!** ## Installation -**Requires:** Linux (+ SSH & shared filesystems if using multiple machines) - ```bash pip install torchrunx ``` +**Requires:** Linux (+ SSH & shared filesystems if using multiple machines) + ## Why? This library uniquely offers: -1. **An automatic launcher that just works for everyone.** No system-specific dependencies and orchestration for *automatic* distribution. `torchrunx` is an SSH-based, pure-Python library that is universally easy to install. +1. **An automatic launcher that just works for everyone** 🚀 + +No system-specific dependencies and orchestration for *automatic* multi-node distribution. `torchrunx` is an SSH-based, pure-Python library that is universally easy to install. + +2. **Conventional CLI commands** 🖥️ + +Run familiar commands, like `python my_script.py ...`, and customize arguments as you wish. + +In contrast to launchers that override the `python` executable in a cumbersome way (e.g. `torchrun --nproc_per_node=2 --nnodes=2 --node_rank=0 --master_addr=100.43.331.111 --master_port=1234 my_script.py ...`). + +3. **Support for more complex workflows in a single script** 🎛️ + +Your workflow may have independent steps that need different parallelizations (e.g. training on 8 GPUs, testing on 1 GPU; comparing throughput on 4, then 8 GPUs; and so forth). CLI-based launchers naively parallelize the entire script for exactly *N* GPUs. In contrast, our library treats these steps in a modular way and permits *degrees* of parallelism in a single script. + +We clean memory leaks (which are unfortunately common in PyTorch) as we go, so previous steps won't crash or adversely affect future steps. + +4. **Better handling of system failures. No more zombies!** 🧟 -2. **Returned control over the CLI.** Our library permits conventional commands (`python my_script.py ...`), in contrast to launchers that override the `python` executable in a cumbersome way (e.g. `torchrun --nproc_per_node=2 --nnodes=2 --node_rank=0 --master_addr=100.43.331.111 --master_port=1234 my_script.py ...`). Users can define their CLI as they wish and determine exactly which launcher/script arguments they want to expose. +With `torchrun`, your "work" is inherently coupled to your main Python process. If the system kills one of your workers (e.g. due to RAM OOM or segmentation faults), there is no way to fail gracefully in Python. Your processes might hang for at least 10 minutes (the NCCL timeout) or become perpetual zombies. -3. **Support for more complex workflows in a single script.** Your workflow may have independent steps that need different parallelizations (e.g. comparing training throughput on 4, then 8 GPUs; training on 8 GPUs, testing on 1 GPU; and so forth). CLI-based launchers naively parallelize the entire script for exactly N GPUs. In contrast, our library treats these steps in a modular way and permits degrees of parallelism in a single script. We clean memory leaks (which are unfortunately common in PyTorch) as we go, so previous steps won't crash or adversely affect future steps. +`torchrunx` decouples "launcher" and "worker" processes. If the system kills a worker, our launcher immediately raises a `WorkerFailure` exception, which users can handle as they wish. We always clean up all nodes, so no more zombies! -4. **Better handling of system failures.** By default, your "work" is inherently coupled to your main Python process. If the system kills one of your workers (e.g. due to RAM OOM or segmentation faults), there is no way to fail gracefully in Python. Your processes might hang for at least 10 minutes (the NCCL timeout) or become perpetual zombies. `torchrunx` decouples "launcher" and "worker" processes. If the system kills a worker, our launcher immediately raises a `WorkerFailure` exception, which users can handle as they wish. We always clean up all nodes, so no more zombies! +5. **Bonus features** 🎁 -5. **Bonus features.** - - Fine-grained, custom handling of logging, environment variables, and exception propagation. We have nice defaults too: no more interleaved logs and irrelevant exceptions! - - No need to manually set up a [`dist.init_process_group`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) - - We automatically detect and infer settings from SLURM environments. - - Start multi-node training from Python notebooks! +- Fine-grained, custom handling of logging, environment variables, and exception propagation. We have nice defaults too: no more interleaved logs and irrelevant exceptions! +- No need to manually set up a [`dist.init_process_group`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) +- We automatically detect and infer settings from SLURM environments. +- Start multi-node training from Python notebooks! On our [roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement): higher-order parallelism, support for debuggers, fuller typing, and more! From 16bff956501a48f53808d9855934dba6515a3aa2 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 02:14:13 -0500 Subject: [PATCH 009/141] switching docs to markdown --- README.md | 2 +- docs/source/advanced.md | 113 +++++++++++++++++ docs/source/advanced.rst | 114 ------------------ docs/source/{api.rst => api.md} | 11 +- docs/source/conf.py | 3 +- .../{contributing.rst => contributing.md} | 2 + docs/source/how_it_works.md | 11 ++ docs/source/how_it_works.rst | 12 -- docs/source/index.md | 20 +++ docs/source/index.rst | 15 --- 10 files changed, 157 insertions(+), 146 deletions(-) create mode 100644 docs/source/advanced.md delete mode 100644 docs/source/advanced.rst rename docs/source/{api.rst => api.md} (71%) rename docs/source/{contributing.rst => contributing.md} (78%) create mode 100644 docs/source/how_it_works.md delete mode 100644 docs/source/how_it_works.rst create mode 100644 docs/source/index.md delete mode 100644 docs/source/index.rst diff --git a/README.md b/README.md index 8a470314..23423e12 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ By [Apoorv Khandelwal](http://apoorvkh.com) and [Peter Curtin](https://github.co Simply put, you can distribute PyTorch functions from Python like: ```python -def train(): ... # implemented below +def train(num_steps: int): ... # implemented below import torchrunx as trx diff --git a/docs/source/advanced.md b/docs/source/advanced.md new file mode 100644 index 00000000..7a79d0e0 --- /dev/null +++ b/docs/source/advanced.md @@ -0,0 +1,113 @@ +# Advanced Usage + +## Multiple functions in one script + +We could also launch multiple functions (e.g. train on many GPUs, test on one GPU): + +```python +import torchrunx as trx + +trained_model = trx.launch( + func=train, + hostnames=["node1", "node2"], + workers_per_host=8 +).rank(0) + +accuracy = trx.launch( + func=test, + func_args=(trained_model,), + hostnames=["localhost"], + workers_per_host=1 +).rank(0) + +print(f'Accuracy: {accuracy}') +``` + +{mod}`torchrunx.launch` is self-cleaning: all processes are terminated (and the used memory is completely released) before the subsequent invocation. + +## Launcher class + +We provide the {mod}`torchrunx.Launcher` class as an alias to {mod}`torchrunx.launch`. + +```{eval-rst} +.. autoclass:: torchrunx.Launcher + :members: +``` + +### CLI integration + +We can use {mod}`torchrunx.Launcher` to populate arguments from the CLI (e.g. with [tyro](https://brentyi.github.io/tyro/)): + +```python +import torchrunx as trx +import tyro + +def distributed_function(): + pass + +if __name__ == "__main__": + launcher = tyro.cli(trx.Launcher) + launcher.run(distributed_function) +``` + +`python ... --help` then results in: + +```bash +╭─ options ─────────────────────────────────────────────╮ +│ -h, --help show this help message and exit │ +│ --hostnames {[STR [STR ...]]}|{auto,slurm} │ +│ (default: auto) │ +│ --workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ +│ (default: auto) │ +│ --ssh-config-file {None}|STR|PATH │ +│ (default: None) │ +│ --backend {None,nccl,gloo,mpi,ucc,auto} │ +│ (default: auto) │ +│ --timeout INT (default: 600) │ +│ --default-env-vars [STR [STR ...]] │ +│ (default: PATH LD_LIBRARY ...) │ +│ --extra-env-vars [STR [STR ...]] │ +│ (default: ) │ +│ --env-file {None}|STR|PATH │ +│ (default: None) │ +╰───────────────────────────────────────────────────────╯ +``` + +## SLURM integration + +By default, the `hostnames` or `workers_per_host` arguments are populated from the current SLURM allocation. If no allocation is detected, we assume 1 machine (localhost) with N workers (num. GPUs or CPUs). +Raises a `RuntimeError` if `hostnames="slurm"` or `workers_per_host="slurm"` but no allocation is detected. + +## Propagating exceptions + +Exceptions that are raised in workers will be raised by the launcher process. + +A {mod}`torchrunx.AgentFailedError` or {mod}`torchrunx.WorkerFailedError` will be raised if any agent or worker dies unexpectedly (e.g. if sent a signal from the OS, due to segmentation faults or OOM). + +## Environment variables + +Environment variables in the launcher process that match the `default_env_vars` argument are automatically copied to agents and workers. We set useful defaults for Python and PyTorch. Environment variables are pattern-matched with this list using `fnmatch`. + +`default_env_vars` can be overriden if desired. This list can be augmented using `extra_env_vars`. Additional environment variables (and more custom bash logic) can be included via the `env_file` argument. Our agents `source` this file. + +We also set the following environment variables in each worker: `LOCAL_RANK`, `RANK`, `LOCAL_WORLD_SIZE`, `WORLD_SIZE`, `MASTER_ADDR`, and `MASTER_PORT`. + +## Custom logging + +We forward all logs (i.e. from {mod}`logging` and {mod}`sys.stdout`/{mod}`sys.stderr`) from workers and agents to the launcher. By default, the logs from the first agent and its first worker are printed into the launcher's `stdout` stream. Logs from all agents and workers are written to files in `$TORCHRUNX_LOG_DIR` (default: `./torchrunx_logs`) and are named by timestamp, hostname, and local_rank. + +{mod}`logging.Handler` objects can be provided via the `handler_factory` argument to provide further customization (mapping specific agents/workers to custom output streams). You must pass a function that returns a list of {mod}`logging.Handler`s to ``handler_factory``. + +We provide some utilities to help: + +```{eval-rst} +.. autofunction:: torchrunx.file_handler +``` + +```{eval-rst} +.. autofunction:: torchrunx.stream_handler +``` + +```{eval-rst} +.. autofunction:: torchrunx.add_filter_to_handler +``` diff --git a/docs/source/advanced.rst b/docs/source/advanced.rst deleted file mode 100644 index bee131a9..00000000 --- a/docs/source/advanced.rst +++ /dev/null @@ -1,114 +0,0 @@ -Advanced Usage -============== - -Multiple functions in one script --------------------------------- - -We could also launch multiple functions (e.g. train on many GPUs, test on one GPU): - -.. code-block:: python - - import torchrunx as trx - - trained_model = trx.launch( - func=train, - hostnames=["node1", "node2"], - workers_per_host=8 - ).rank(0) - - accuracy = trx.launch( - func=test, - func_args=(trained_model,), - hostnames=["localhost"], - workers_per_host=1 - ).rank(0) - - print(f'Accuracy: {accuracy}') - - -:mod:`torchrunx.launch` is self-cleaning: all processes are terminated (and the used memory is completely released) before the subsequent invocation. - -Launcher class --------------- - -We provide the :mod:`torchrunx.Launcher` class as an alias to :mod:`torchrunx.launch`. - -.. autoclass:: torchrunx.Launcher - :members: - -CLI integration -^^^^^^^^^^^^^^^ - -We can use :mod:`torchrunx.Launcher` to populate arguments from the CLI (e.g. with `tyro `_): - -.. code:: python - - import torchrunx as trx - import tyro - - def distributed_function(): - pass - - if __name__ == "__main__": - launcher = tyro.cli(trx.Launcher) - launcher.run(distributed_function) - -``python ... --help`` then results in: - -.. code:: bash - - ╭─ options ─────────────────────────────────────────────╮ - │ -h, --help show this help message and exit │ - │ --hostnames {[STR [STR ...]]}|{auto,slurm} │ - │ (default: auto) │ - │ --workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ - │ (default: auto) │ - │ --ssh-config-file {None}|STR|PATH │ - │ (default: None) │ - │ --backend {None,nccl,gloo,mpi,ucc,auto} │ - │ (default: auto) │ - │ --timeout INT (default: 600) │ - │ --default-env-vars [STR [STR ...]] │ - │ (default: PATH LD_LIBRARY ...) │ - │ --extra-env-vars [STR [STR ...]] │ - │ (default: ) │ - │ --env-file {None}|STR|PATH │ - │ (default: None) │ - ╰───────────────────────────────────────────────────────╯ - -SLURM integration ------------------ - -By default, the ``hostnames`` or ``workers_per_host`` arguments are populated from the current SLURM allocation. If no allocation is detected, we assume 1 machine (localhost) with N workers (num. GPUs or CPUs). -Raises a ``RuntimeError`` if ``hostnames="slurm"`` or ``workers_per_host="slurm"`` but no allocation is detected. - -Propagating exceptions ----------------------- - -Exceptions that are raised in workers will be raised by the launcher process. - -A :mod:`torchrunx.AgentFailedError` or :mod:`torchrunx.WorkerFailedError` will be raised if any agent or worker dies unexpectedly (e.g. if sent a signal from the OS, due to segmentation faults or OOM). - -Environment variables ---------------------- - -Environment variables in the launcher process that match the ``default_env_vars`` argument are automatically copied to agents and workers. We set useful defaults for Python and PyTorch. Environment variables are pattern-matched with this list using ``fnmatch``. - -``default_env_vars`` can be overriden if desired. This list can be augmented using ``extra_env_vars``. Additional environment variables (and more custom bash logic) can be included via the ``env_file`` argument. Our agents ``source`` this file. - -We also set the following environment variables in each worker: ``LOCAL_RANK``, ``RANK``, ``LOCAL_WORLD_SIZE``, ``WORLD_SIZE``, ``MASTER_ADDR``, and ``MASTER_PORT``. - -Custom logging --------------- - -We forward all logs (i.e. from :mod:`logging` and :mod:`sys.stdout`/:mod:`sys.stderr`) from workers and agents to the launcher. By default, the logs from the first agent and its first worker are printed into the launcher's ``stdout`` stream. Logs from all agents and workers are written to files in ``$TORCHRUNX_LOG_DIR`` (default: ``./torchrunx_logs``) and are named by timestamp, hostname, and local_rank. - -:mod:`logging.Handler` objects can be provided via the ``handler_factory`` argument to provide further customization (mapping specific agents/workers to custom output streams). You must pass a function that returns a list of :mod:`logging.Handler`s to ``handler_factory``. - -We provide some utilities to help: - -.. autofunction:: torchrunx.file_handler - -.. autofunction:: torchrunx.stream_handler - -.. autofunction:: torchrunx.add_filter_to_handler diff --git a/docs/source/api.rst b/docs/source/api.md similarity index 71% rename from docs/source/api.rst rename to docs/source/api.md index 518b323f..1025621d 100644 --- a/docs/source/api.rst +++ b/docs/source/api.md @@ -1,11 +1,18 @@ -API -============= +# API +```{eval-rst} .. autofunction:: torchrunx.launch(func: Callable, ...) +``` +```{eval-rst} .. autoclass:: torchrunx.LaunchResult :members: +``` +```{eval-rst} .. autoclass:: torchrunx.AgentFailedError +``` +```{eval-rst} .. autoclass:: torchrunx.WorkerFailedError +``` diff --git a/docs/source/conf.py b/docs/source/conf.py index 8e64563c..65bd522c 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -12,13 +12,12 @@ extensions = [ "sphinx.ext.duration", - "sphinx.ext.autodoc", "sphinx.ext.intersphinx", + "sphinx-autodoc2", "myst_parser", "sphinx_toolbox.sidebar_links", "sphinx_toolbox.github", "sphinx.ext.napoleon", - "sphinx.ext.autodoc.typehints", "sphinx.ext.linkcode", ] diff --git a/docs/source/contributing.rst b/docs/source/contributing.md similarity index 78% rename from docs/source/contributing.rst rename to docs/source/contributing.md index 7a661a30..5d3d3c56 100644 --- a/docs/source/contributing.rst +++ b/docs/source/contributing.md @@ -1,2 +1,4 @@ +```{eval-rst} .. include:: ../../CONTRIBUTING.md :parser: myst_parser.sphinx_ +``` diff --git a/docs/source/how_it_works.md b/docs/source/how_it_works.md new file mode 100644 index 00000000..4062e87d --- /dev/null +++ b/docs/source/how_it_works.md @@ -0,0 +1,11 @@ +# How it works + +If you want to (e.g.) train your model on several machines with **N** GPUs each, you should run your training function in **N** parallel processes on each machine. During training, each of these processes runs the same training code (i.e. your function) and communicate with each other (e.g. to synchronize gradients) using a [distributed process group](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group). + +Your script can call our library (via `mod:torchrunx.launch`) and specify a function to distribute. The main process running your script is henceforth known as the **launcher** process. + +Our launcher process spawns an **agent** process (via SSH) on each machine. Each agent then spawns **N** processes (known as **workers**) on its machine. All workers form a process group (with the specified `mod:torchrunx.launch` `backend`) and run your function in parallel. + +**Agent–Worker Communication.** Our agents poll their workers every second and time-out if unresponsive for 5 seconds. Upon polling, our agents receive `None` (if the worker is still running) or a [RunProcsResult](https://pytorch.org/docs/stable/elastic/multiprocessing.html#torch.distributed.elastic.multiprocessing.api.RunProcsResult), indicating that the workers have either completed (providing an object returned from or the exception raised by our function) or failed (e.g. due to segmentation fault or OS signal). + +**Launcher–Agent Communication.** The launcher and agents form a distributed group (with the CPU-based [GLOO backend](https://pytorch.org/docs/stable/distributed.html#backends)) for the communication purposes of our library. Our agents synchronize their own "statuses" with each other and the launcher. An agent's status can include whether it is running/failed/completed and the result of the function. If the launcher or any agent fails to synchronize, all raise a `mod:torchrunx.AgentFailedError` and terminate. If any worker fails or raises an exception, the launcher raises a `mod:torchrunx.WorkerFailedError` or that exception and terminates along with all the agents. If all agents succeed, the launcher returns the objects returned by each worker. diff --git a/docs/source/how_it_works.rst b/docs/source/how_it_works.rst deleted file mode 100644 index 9550e8d1..00000000 --- a/docs/source/how_it_works.rst +++ /dev/null @@ -1,12 +0,0 @@ -How it works -============ - -If you want to (e.g.) train your model on several machines with **N** GPUs each, you should run your training function in **N** parallel processes on each machine. During training, each of these processes runs the same training code (i.e. your function) and communicate with each other (e.g. to synchronize gradients) using a `distributed process group `_. - -Your script can call our library (via `mod:torchrunx.launch`) and specify a function to distribute. The main process running your script is henceforth known as the **launcher** process. - -Our launcher process spawns an **agent** process (via SSH) on each machine. Each agent then spawns **N** processes (known as **workers**) on its machine. All workers form a process group (with the specified `mod:torchrunx.launch` ``backend``) and run your function in parallel. - -**Agent–Worker Communication.** Our agents poll their workers every second and time-out if unresponsive for 5 seconds. Upon polling, our agents receive ``None`` (if the worker is still running) or a `RunProcsResult `_, indicating that the workers have either completed (providing an object returned from or the exception raised by our function) or failed (e.g. due to segmentation fault or OS signal). - -**Launcher–Agent Communication.** The launcher and agents form a distributed group (with the CPU-based `GLOO backend `_) for the communication purposes of our library. Our agents synchronize their own "statuses" with each other and the launcher. An agent's status can include whether it is running/failed/completed and the result of the function. If the launcher or any agent fails to synchronize, all raise a `mod:torchrunx.AgentFailedError` and terminate. If any worker fails or raises an exception, the launcher raises a `mod:torchrunx.WorkerFailedError` or that exception and terminates along with all the agents. If all agents succeed, the launcher returns the objects returned by each worker. diff --git a/docs/source/index.md b/docs/source/index.md new file mode 100644 index 00000000..00e2b62c --- /dev/null +++ b/docs/source/index.md @@ -0,0 +1,20 @@ +```{eval-rst} +.. include:: ../../README.md + :parser: myst_parser.sphinx_ +``` + +```{toctree} +:hidden: true +:maxdepth: 1 + +api +advanced +how_it_works +contributing +``` + +```{eval-rst} +.. sidebar-links:: + :github: + :pypi: torchrunx +``` diff --git a/docs/source/index.rst b/docs/source/index.rst deleted file mode 100644 index 55900595..00000000 --- a/docs/source/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. include:: ../../README.md - :parser: myst_parser.sphinx_ - -.. toctree:: - :hidden: - :maxdepth: 1 - - api - advanced - how_it_works - contributing - -.. sidebar-links:: - :github: - :pypi: torchrunx From 5d30173087b12447af437b7744a9490151392857 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 02:25:23 -0500 Subject: [PATCH 010/141] added sphinx-autodoc2 as docs req --- docs/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/requirements.txt b/docs/requirements.txt index 06ac352c..45247e57 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -2,3 +2,4 @@ sphinx==6.2.1 furo myst-parser sphinx-toolbox +sphinx-autodoc2 From e68d126ae895a7348d14657aef94fcf819c57dba Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 02:28:15 -0500 Subject: [PATCH 011/141] update license in citation.cff --- CITATION.cff | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CITATION.cff b/CITATION.cff index dcd820c1..0ba3bbe0 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -9,6 +9,6 @@ authors: family-names: Curtin email: peter_curtin@brown.edu repository-code: 'https://github.com/apoorvkh/torchrunx' -url: torchrunx.readthedocs.io -license: MIT +url: 'https://torchrunx.readthedocs.io' +license: GPL-3.0 year: 2024 From cade0796237711cd9c46c4d6b971cb7b48fa9170 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 02:32:00 -0500 Subject: [PATCH 012/141] fix autodoc2 import --- docs/source/conf.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 65bd522c..84d5fa87 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -13,7 +13,7 @@ extensions = [ "sphinx.ext.duration", "sphinx.ext.intersphinx", - "sphinx-autodoc2", + "autodoc2", "myst_parser", "sphinx_toolbox.sidebar_links", "sphinx_toolbox.github", @@ -21,9 +21,10 @@ "sphinx.ext.linkcode", ] -autodoc_mock_imports = ["torch", "fabric", "cloudpickle", "sys", "logging", "typing_extensions"] -autodoc_typehints = "both" -autodoc_typehints_description_target = "documented_params" +autodoc2_packages = [ + "../../src", +] +autodoc2_render_plugin = "myst" maximum_signature_line_length = 100 From 4e8c4caa7502a4fba34a9eceae5e65289d6fbb47 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 02:33:57 -0500 Subject: [PATCH 013/141] update autodoc2_packages --- docs/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 84d5fa87..39aac572 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -22,7 +22,7 @@ ] autodoc2_packages = [ - "../../src", + "../../src/torchrunx", ] autodoc2_render_plugin = "myst" From d4377d51f3a0cfb1974ca8158da3e409655d9793 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 03:34:38 -0500 Subject: [PATCH 014/141] update readthedocs deps --- docs/.readthedocs.yaml | 5 ++++- docs/requirements.txt | 5 ----- pyproject.toml | 1 + 3 files changed, 5 insertions(+), 6 deletions(-) delete mode 100644 docs/requirements.txt diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index 036741ce..69bb4930 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -10,4 +10,7 @@ sphinx: python: install: - - requirements: docs/requirements.txt + - method: pip + path: . + extra_requirements: + - docs diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 45247e57..00000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -sphinx==6.2.1 -furo -myst-parser -sphinx-toolbox -sphinx-autodoc2 diff --git a/pyproject.toml b/pyproject.toml index 8598d58c..e122b154 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,6 +32,7 @@ dependencies = [ [dependency-groups] dev = ["ruff", "pyright", "pytest", "build", "twine"] dev-extras = ["submitit", "transformers"] +docs = ["sphinx==6.2.1", "furo", "myst-parser", "sphinx-autodoc2", "sphinx-toolbox"] [tool.uv] managed = true From 037b42dddd12559e19d76ea1e3cf03a17b98a295 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 03:56:10 -0500 Subject: [PATCH 015/141] update uv lock --- docs/.readthedocs.yaml | 13 +- uv.lock | 620 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 618 insertions(+), 15 deletions(-) diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index 69bb4930..98530865 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -4,13 +4,12 @@ build: os: ubuntu-24.04 tools: python: "3.9" + commands: + - asdf plugin add uv + - asdf install uv latest + - asdf global uv latest + - uv sync --extra docs --frozen + - uv run -m sphinx -T -b html -d docs/_build/doctrees -D language=en docs $READTHEDOCS_OUTPUT/html sphinx: configuration: docs/source/conf.py - -python: - install: - - method: pip - path: . - extra_requirements: - - docs diff --git a/uv.lock b/uv.lock index 07108f62..fd5adfc4 100644 --- a/uv.lock +++ b/uv.lock @@ -2,7 +2,78 @@ version = 1 requires-python = ">=3.9" resolution-markers = [ "python_full_version < '3.12'", - "python_full_version >= '3.12'", + "python_full_version == '3.12.*'", + "python_full_version >= '3.13'", +] + +[[package]] +name = "alabaster" +version = "0.7.16" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/3e/13dd8e5ed9094e734ac430b5d0eb4f2bb001708a8b7856cbf8e084e001ba/alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65", size = 23776 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/34/d4e1c02d3bee589efb5dfa17f88ea08bdb3e3eac12bc475462aec52ed223/alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92", size = 13511 }, +] + +[[package]] +name = "apeye" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "apeye-core" }, + { name = "domdf-python-tools" }, + { name = "platformdirs" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4f/6b/cc65e31843d7bfda8313a9dc0c77a21e8580b782adca53c7cb3e511fe023/apeye-1.4.1.tar.gz", hash = "sha256:14ea542fad689e3bfdbda2189a354a4908e90aee4bf84c15ab75d68453d76a36", size = 99219 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/7b/2d63664777b3e831ac1b1d8df5bbf0b7c8bee48e57115896080890527b1b/apeye-1.4.1-py3-none-any.whl", hash = "sha256:44e58a9104ec189bf42e76b3a7fe91e2b2879d96d48e9a77e5e32ff699c9204e", size = 107989 }, +] + +[[package]] +name = "apeye-core" +version = "1.1.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "domdf-python-tools" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/4c/4f108cfd06923bd897bf992a6ecb6fb122646ee7af94d7f9a64abd071d4c/apeye_core-1.1.5.tar.gz", hash = "sha256:5de72ed3d00cc9b20fea55e54b7ab8f5ef8500eb33a5368bc162a5585e238a55", size = 96511 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/9f/fa9971d2a0c6fef64c87ba362a493a4f230eff4ea8dfb9f4c7cbdf71892e/apeye_core-1.1.5-py3-none-any.whl", hash = "sha256:dc27a93f8c9e246b3b238c5ea51edf6115ab2618ef029b9f2d9a190ec8228fbf", size = 99286 }, +] + +[[package]] +name = "astroid" +version = "3.3.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/1e/326fb1d3d83a3bb77c9f9be29d31f2901e35acb94b0605c3f2e5085047f9/astroid-3.3.5.tar.gz", hash = "sha256:5cfc40ae9f68311075d27ef68a4841bdc5cc7f6cf86671b49f00607d30188e2d", size = 397229 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/30/624365383fa4a40329c0f0bbbc151abc4a64e30dfc110fc8f6e2afcd02bb/astroid-3.3.5-py3-none-any.whl", hash = "sha256:a9d1c946ada25098d790e079ba2a1b112157278f3fb7e718ae6a9252f5835dc8", size = 274586 }, +] + +[[package]] +name = "autodocsumm" +version = "0.2.14" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/03/96/92afe8a7912b327c01f0a8b6408c9556ee13b1aba5b98d587ac7327ff32d/autodocsumm-0.2.14.tar.gz", hash = "sha256:2839a9d4facc3c4eccd306c08695540911042b46eeafcdc3203e6d0bab40bc77", size = 46357 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/bc/3f66af9beb683728e06ca08797e4e9d3e44f432f339718cae3ba856a9cad/autodocsumm-0.2.14-py3-none-any.whl", hash = "sha256:3bad8717fc5190802c60392a7ab04b9f3c97aa9efa8b3780b3d81d615bfe5dc0", size = 14640 }, +] + +[[package]] +name = "babel" +version = "2.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/74/f1bc80f23eeba13393b7222b11d95ca3af2c1e28edca18af487137eefed9/babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316", size = 9348104 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/20/bc79bc575ba2e2a7f70e8a1155618bb1301eaa5132a8271373a6903f73f8/babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b", size = 9587599 }, ] [[package]] @@ -48,6 +119,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8b/79/76a139d1b9f11aa4afcb7ceb882d2e81003667681711f2fe8a302c4c10ca/bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:373db9abe198e8e2c70d12b479464e0d5092cc122b20ec504097b5f2297ed184", size = 274081 }, ] +[[package]] +name = "beautifulsoup4" +version = "4.12.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b3/ca/824b1195773ce6166d388573fc106ce56d4a805bd7427b624e063596ec58/beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051", size = 581181 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/fe/e8c672695b37eecc5cbf43e1d0638d88d66ba3a44c4d321c796f4e59167f/beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed", size = 147925 }, +] + [[package]] name = "build" version = "1.2.2.post1" @@ -64,6 +147,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/84/c2/80633736cd183ee4a62107413def345f7e6e3c01563dbca1417363cf957e/build-1.2.2.post1-py3-none-any.whl", hash = "sha256:1d61c0887fa860c01971625baae8bdd338e517b836a2f70dd1f7aa3a6b2fc5b5", size = 22950 }, ] +[[package]] +name = "cachecontrol" +version = "0.14.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "msgpack" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d2/23/db12e0b6b241e33f77f7cce01a06b4cc6f8071728656cc0ea262d2a14dad/cachecontrol-0.14.1.tar.gz", hash = "sha256:06ef916a1e4eb7dba9948cdfc9c76e749db2e02104a9a1277e8b642591a0f717", size = 28928 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/aa/481eb52af52aae093c61c181f2308779973ffd6f0f5f6c0881b2138f3087/cachecontrol-0.14.1-py3-none-any.whl", hash = "sha256:65e3abd62b06382ce3894df60dde9e0deb92aeb734724f68fa4f3b91e97206b9", size = 22085 }, +] + +[package.optional-dependencies] +filecache = [ + { name = "filelock" }, +] + [[package]] name = "certifi" version = "2024.8.30" @@ -281,6 +382,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/21/ea/6c38ca546d5b6dab3874c2b8fc6b1739baac29bacdea31a8c6c0513b3cfa/cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff", size = 2989787 }, ] +[[package]] +name = "cssutils" +version = "2.11.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "more-itertools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/33/9f/329d26121fe165be44b1dfff21aa0dc348f04633931f1d20ed6cf448a236/cssutils-2.11.1.tar.gz", hash = "sha256:0563a76513b6af6eebbe788c3bf3d01c920e46b3f90c8416738c5cfc773ff8e2", size = 711657 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/ec/bb273b7208c606890dc36540fe667d06ce840a6f62f9fae7e658fcdc90fb/cssutils-2.11.1-py3-none-any.whl", hash = "sha256:a67bfdfdff4f3867fab43698ec4897c1a828eca5973f4073321b3bccaf1199b1", size = 385747 }, +] + [[package]] name = "decorator" version = "5.1.1" @@ -302,13 +415,39 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/8d/778b7d51b981a96554f29136cd59ca7880bf58094338085bcf2a979a0e6a/Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c", size = 9561 }, ] +[[package]] +name = "dict2css" +version = "0.3.0.post1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cssutils" }, + { name = "domdf-python-tools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/24/eb/776eef1f1aa0188c0fc165c3a60b71027539f71f2eedc43ad21b060e9c39/dict2css-0.3.0.post1.tar.gz", hash = "sha256:89c544c21c4ca7472c3fffb9d37d3d926f606329afdb751dc1de67a411b70719", size = 7845 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/47/290daabcf91628f4fc0e17c75a1690b354ba067066cd14407712600e609f/dict2css-0.3.0.post1-py3-none-any.whl", hash = "sha256:f006a6b774c3e31869015122ae82c491fd25e7de4a75607a62aa3e798f837e0d", size = 25647 }, +] + [[package]] name = "docutils" -version = "0.21.2" +version = "0.19" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/330ea8d383eb2ce973df34d1239b3b21e91cd8c865d21ff82902d952f91f/docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6", size = 2056383 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/69/e391bd51bc08ed9141ecd899a0ddb61ab6465309f1eb470905c0c8868081/docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc", size = 570472 }, +] + +[[package]] +name = "domdf-python-tools" +version = "3.9.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444 } +dependencies = [ + { name = "natsort" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6b/78/974e10c583ba9d2302e748c9585313a7f2c7ba00e4f600324f432e38fe68/domdf_python_tools-3.9.0.tar.gz", hash = "sha256:1f8a96971178333a55e083e35610d7688cd7620ad2b99790164e1fc1a3614c18", size = 103792 } wheels = [ - { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408 }, + { url = "https://files.pythonhosted.org/packages/de/e9/7447a88b217650a74927d3444a89507986479a69b83741900eddd34167fe/domdf_python_tools-3.9.0-py3-none-any.whl", hash = "sha256:4e1ef365cbc24627d6d1e90cf7d46d8ab8df967e1237f4a26885f6986c78872e", size = 127106 }, ] [[package]] @@ -353,6 +492,34 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c6/b2/454d6e7f0158951d8a78c2e1eb4f69ae81beb8dca5fee9809c6c99e9d0d0/fsspec-2024.10.0-py3-none-any.whl", hash = "sha256:03b9a6785766a4de40368b88906366755e2819e758b83705c88cd7cb5fe81871", size = 179641 }, ] +[[package]] +name = "furo" +version = "2024.8.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "pygments" }, + { name = "sphinx" }, + { name = "sphinx-basic-ng" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a0/e2/d351d69a9a9e4badb4a5be062c2d0e87bd9e6c23b5e57337fef14bef34c8/furo-2024.8.6.tar.gz", hash = "sha256:b63e4cee8abfc3136d3bc03a3d45a76a850bada4d6374d24c1716b0e01394a01", size = 1661506 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/48/e791a7ed487dbb9729ef32bb5d1af16693d8925f4366befef54119b2e576/furo-2024.8.6-py3-none-any.whl", hash = "sha256:6cd97c58b47813d3619e63e9081169880fbe331f0ca883c871ff1f3f11814f5c", size = 341333 }, +] + +[[package]] +name = "html5lib" +version = "1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, + { name = "webencodings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ac/b6/b55c3f49042f1df3dcd422b7f224f939892ee94f22abcf503a9b7339eaf2/html5lib-1.1.tar.gz", hash = "sha256:b2e5b40261e20f354d198eae92afc10d750afb487ed5e50f9c4eaf07c184146f", size = 272215 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6c/dd/a834df6482147d48e225a49515aabc28974ad5a4ca3215c18a882565b028/html5lib-1.1-py2.py3-none-any.whl", hash = "sha256:0d78f8fde1c230e99fe37986a60526d7049ed4bf8a9fadbad5f00e22e58e041d", size = 112173 }, +] + [[package]] name = "huggingface-hub" version = "0.26.2" @@ -380,6 +547,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, ] +[[package]] +name = "imagesize" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a", size = 1280026 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769 }, +] + [[package]] name = "importlib-metadata" version = "8.5.0" @@ -565,6 +741,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b3/73/085399401383ce949f727afec55ec3abd76648d04b9f22e1c0e99cb4bec3/MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a", size = 15506 }, ] +[[package]] +name = "mdit-py-plugins" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/03/a2ecab526543b152300717cf232bb4bb8605b6edb946c845016fa9c9c9fd/mdit_py_plugins-0.4.2.tar.gz", hash = "sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5", size = 43542 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/f7/7782a043553ee469c1ff49cfa1cdace2d6bf99a1f333cf38676b3ddf30da/mdit_py_plugins-0.4.2-py3-none-any.whl", hash = "sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636", size = 55316 }, +] + [[package]] name = "mdurl" version = "0.1.2" @@ -592,6 +780,95 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198 }, ] +[[package]] +name = "msgpack" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cb/d0/7555686ae7ff5731205df1012ede15dd9d927f6227ea151e901c7406af4f/msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e", size = 167260 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/f9/a892a6038c861fa849b11a2bb0502c07bc698ab6ea53359e5771397d883b/msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd", size = 150428 }, + { url = "https://files.pythonhosted.org/packages/df/7a/d174cc6a3b6bb85556e6a046d3193294a92f9a8e583cdbd46dc8a1d7e7f4/msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d", size = 84131 }, + { url = "https://files.pythonhosted.org/packages/08/52/bf4fbf72f897a23a56b822997a72c16de07d8d56d7bf273242f884055682/msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5", size = 81215 }, + { url = "https://files.pythonhosted.org/packages/02/95/dc0044b439b518236aaf012da4677c1b8183ce388411ad1b1e63c32d8979/msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5", size = 371229 }, + { url = "https://files.pythonhosted.org/packages/ff/75/09081792db60470bef19d9c2be89f024d366b1e1973c197bb59e6aabc647/msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e", size = 378034 }, + { url = "https://files.pythonhosted.org/packages/32/d3/c152e0c55fead87dd948d4b29879b0f14feeeec92ef1fd2ec21b107c3f49/msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b", size = 363070 }, + { url = "https://files.pythonhosted.org/packages/d9/2c/82e73506dd55f9e43ac8aa007c9dd088c6f0de2aa19e8f7330e6a65879fc/msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f", size = 359863 }, + { url = "https://files.pythonhosted.org/packages/cb/a0/3d093b248837094220e1edc9ec4337de3443b1cfeeb6e0896af8ccc4cc7a/msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68", size = 368166 }, + { url = "https://files.pythonhosted.org/packages/e4/13/7646f14f06838b406cf5a6ddbb7e8dc78b4996d891ab3b93c33d1ccc8678/msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b", size = 370105 }, + { url = "https://files.pythonhosted.org/packages/67/fa/dbbd2443e4578e165192dabbc6a22c0812cda2649261b1264ff515f19f15/msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044", size = 68513 }, + { url = "https://files.pythonhosted.org/packages/24/ce/c2c8fbf0ded750cb63cbcbb61bc1f2dfd69e16dca30a8af8ba80ec182dcd/msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f", size = 74687 }, + { url = "https://files.pythonhosted.org/packages/b7/5e/a4c7154ba65d93be91f2f1e55f90e76c5f91ccadc7efc4341e6f04c8647f/msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7", size = 150803 }, + { url = "https://files.pythonhosted.org/packages/60/c2/687684164698f1d51c41778c838d854965dd284a4b9d3a44beba9265c931/msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa", size = 84343 }, + { url = "https://files.pythonhosted.org/packages/42/ae/d3adea9bb4a1342763556078b5765e666f8fdf242e00f3f6657380920972/msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701", size = 81408 }, + { url = "https://files.pythonhosted.org/packages/dc/17/6313325a6ff40ce9c3207293aee3ba50104aed6c2c1559d20d09e5c1ff54/msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6", size = 396096 }, + { url = "https://files.pythonhosted.org/packages/a8/a1/ad7b84b91ab5a324e707f4c9761633e357820b011a01e34ce658c1dda7cc/msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59", size = 403671 }, + { url = "https://files.pythonhosted.org/packages/bb/0b/fd5b7c0b308bbf1831df0ca04ec76fe2f5bf6319833646b0a4bd5e9dc76d/msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0", size = 387414 }, + { url = "https://files.pythonhosted.org/packages/f0/03/ff8233b7c6e9929a1f5da3c7860eccd847e2523ca2de0d8ef4878d354cfa/msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e", size = 383759 }, + { url = "https://files.pythonhosted.org/packages/1f/1b/eb82e1fed5a16dddd9bc75f0854b6e2fe86c0259c4353666d7fab37d39f4/msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6", size = 394405 }, + { url = "https://files.pythonhosted.org/packages/90/2e/962c6004e373d54ecf33d695fb1402f99b51832631e37c49273cc564ffc5/msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5", size = 396041 }, + { url = "https://files.pythonhosted.org/packages/f8/20/6e03342f629474414860c48aeffcc2f7f50ddaf351d95f20c3f1c67399a8/msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88", size = 68538 }, + { url = "https://files.pythonhosted.org/packages/aa/c4/5a582fc9a87991a3e6f6800e9bb2f3c82972912235eb9539954f3e9997c7/msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788", size = 74871 }, + { url = "https://files.pythonhosted.org/packages/e1/d6/716b7ca1dbde63290d2973d22bbef1b5032ca634c3ff4384a958ec3f093a/msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d", size = 152421 }, + { url = "https://files.pythonhosted.org/packages/70/da/5312b067f6773429cec2f8f08b021c06af416bba340c912c2ec778539ed6/msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2", size = 85277 }, + { url = "https://files.pythonhosted.org/packages/28/51/da7f3ae4462e8bb98af0d5bdf2707f1b8c65a0d4f496e46b6afb06cbc286/msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420", size = 82222 }, + { url = "https://files.pythonhosted.org/packages/33/af/dc95c4b2a49cff17ce47611ca9ba218198806cad7796c0b01d1e332c86bb/msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2", size = 392971 }, + { url = "https://files.pythonhosted.org/packages/f1/54/65af8de681fa8255402c80eda2a501ba467921d5a7a028c9c22a2c2eedb5/msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39", size = 401403 }, + { url = "https://files.pythonhosted.org/packages/97/8c/e333690777bd33919ab7024269dc3c41c76ef5137b211d776fbb404bfead/msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f", size = 385356 }, + { url = "https://files.pythonhosted.org/packages/57/52/406795ba478dc1c890559dd4e89280fa86506608a28ccf3a72fbf45df9f5/msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247", size = 383028 }, + { url = "https://files.pythonhosted.org/packages/e7/69/053b6549bf90a3acadcd8232eae03e2fefc87f066a5b9fbb37e2e608859f/msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c", size = 391100 }, + { url = "https://files.pythonhosted.org/packages/23/f0/d4101d4da054f04274995ddc4086c2715d9b93111eb9ed49686c0f7ccc8a/msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b", size = 394254 }, + { url = "https://files.pythonhosted.org/packages/1c/12/cf07458f35d0d775ff3a2dc5559fa2e1fcd06c46f1ef510e594ebefdca01/msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b", size = 69085 }, + { url = "https://files.pythonhosted.org/packages/73/80/2708a4641f7d553a63bc934a3eb7214806b5b39d200133ca7f7afb0a53e8/msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f", size = 75347 }, + { url = "https://files.pythonhosted.org/packages/c8/b0/380f5f639543a4ac413e969109978feb1f3c66e931068f91ab6ab0f8be00/msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf", size = 151142 }, + { url = "https://files.pythonhosted.org/packages/c8/ee/be57e9702400a6cb2606883d55b05784fada898dfc7fd12608ab1fdb054e/msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330", size = 84523 }, + { url = "https://files.pythonhosted.org/packages/7e/3a/2919f63acca3c119565449681ad08a2f84b2171ddfcff1dba6959db2cceb/msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734", size = 81556 }, + { url = "https://files.pythonhosted.org/packages/7c/43/a11113d9e5c1498c145a8925768ea2d5fce7cbab15c99cda655aa09947ed/msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e", size = 392105 }, + { url = "https://files.pythonhosted.org/packages/2d/7b/2c1d74ca6c94f70a1add74a8393a0138172207dc5de6fc6269483519d048/msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca", size = 399979 }, + { url = "https://files.pythonhosted.org/packages/82/8c/cf64ae518c7b8efc763ca1f1348a96f0e37150061e777a8ea5430b413a74/msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915", size = 383816 }, + { url = "https://files.pythonhosted.org/packages/69/86/a847ef7a0f5ef3fa94ae20f52a4cacf596a4e4a010197fbcc27744eb9a83/msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d", size = 380973 }, + { url = "https://files.pythonhosted.org/packages/aa/90/c74cf6e1126faa93185d3b830ee97246ecc4fe12cf9d2d31318ee4246994/msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434", size = 387435 }, + { url = "https://files.pythonhosted.org/packages/7a/40/631c238f1f338eb09f4acb0f34ab5862c4e9d7eda11c1b685471a4c5ea37/msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c", size = 399082 }, + { url = "https://files.pythonhosted.org/packages/e9/1b/fa8a952be252a1555ed39f97c06778e3aeb9123aa4cccc0fd2acd0b4e315/msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc", size = 69037 }, + { url = "https://files.pythonhosted.org/packages/b6/bc/8bd826dd03e022153bfa1766dcdec4976d6c818865ed54223d71f07862b3/msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f", size = 75140 }, + { url = "https://files.pythonhosted.org/packages/f7/3b/544a5c5886042b80e1f4847a4757af3430f60d106d8d43bb7be72c9e9650/msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1", size = 150713 }, + { url = "https://files.pythonhosted.org/packages/93/af/d63f25bcccd3d6f06fd518ba4a321f34a4370c67b579ca5c70b4a37721b4/msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48", size = 84277 }, + { url = "https://files.pythonhosted.org/packages/92/9b/5c0dfb0009b9f96328664fecb9f8e4e9c8a1ae919e6d53986c1b813cb493/msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c", size = 81357 }, + { url = "https://files.pythonhosted.org/packages/d1/7c/3a9ee6ec9fc3e47681ad39b4d344ee04ff20a776b594fba92d88d8b68356/msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468", size = 371256 }, + { url = "https://files.pythonhosted.org/packages/f7/0a/8a213cecea7b731c540f25212ba5f9a818f358237ac51a44d448bd753690/msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74", size = 377868 }, + { url = "https://files.pythonhosted.org/packages/1b/94/a82b0db0981e9586ed5af77d6cfb343da05d7437dceaae3b35d346498110/msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846", size = 363370 }, + { url = "https://files.pythonhosted.org/packages/93/fc/6c7f0dcc1c913e14861e16eaf494c07fc1dde454ec726ff8cebcf348ae53/msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346", size = 358970 }, + { url = "https://files.pythonhosted.org/packages/1f/c6/e4a04c0089deace870dabcdef5c9f12798f958e2e81d5012501edaff342f/msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b", size = 366358 }, + { url = "https://files.pythonhosted.org/packages/b6/54/7d8317dac590cf16b3e08e3fb74d2081e5af44eb396f0effa13f17777f30/msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8", size = 370336 }, + { url = "https://files.pythonhosted.org/packages/dc/6f/a5a1f43b6566831e9630e5bc5d86034a8884386297302be128402555dde1/msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd", size = 68683 }, + { url = "https://files.pythonhosted.org/packages/5f/e8/2162621e18dbc36e2bc8492fd0e97b3975f5d89fe0472ae6d5f7fbdd8cf7/msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325", size = 74787 }, +] + +[[package]] +name = "myst-parser" +version = "3.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "jinja2" }, + { name = "markdown-it-py" }, + { name = "mdit-py-plugins" }, + { name = "pyyaml" }, + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/49/64/e2f13dac02f599980798c01156393b781aec983b52a6e4057ee58f07c43a/myst_parser-3.0.1.tar.gz", hash = "sha256:88f0cb406cb363b077d176b51c476f62d60604d68a8dcdf4832e080441301a87", size = 92392 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/de/21aa8394f16add8f7427f0a1326ccd2b3a2a8a3245c9252bc5ac034c6155/myst_parser-3.0.1-py3-none-any.whl", hash = "sha256:6457aaa33a5d474aca678b8ead9b3dc298e89c68e67012e73146ea6fd54babf1", size = 83163 }, +] + +[[package]] +name = "natsort" +version = "8.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e2/a9/a0c57aee75f77794adaf35322f8b6404cbd0f89ad45c87197a937764b7d0/natsort-8.4.0.tar.gz", hash = "sha256:45312c4a0e5507593da193dedd04abb1469253b601ecaf63445ad80f0a1ea581", size = 76575 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/82/7a9d0550484a62c6da82858ee9419f3dd1ccc9aa1c26a1e43da3ecd20b0d/natsort-8.4.0-py3-none-any.whl", hash = "sha256:4732914fb471f56b5cce04d7bae6f164a592c7712e1c85f9ef585e197299521c", size = 38268 }, +] + [[package]] name = "networkx" version = "3.2.1" @@ -837,6 +1114,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/56/09/054aea9b7534a15ad38a363a2bd974c20646ab1582a387a95b8df1bfea1c/pkginfo-1.10.0-py3-none-any.whl", hash = "sha256:889a6da2ed7ffc58ab5b900d888ddce90bce912f2d2de1dc1c26f4cb9fe65097", size = 30392 }, ] +[[package]] +name = "platformdirs" +version = "4.3.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/fc/128cc9cb8f03208bdbf93d3aa862e16d376844a14f9a0ce5cf4507372de4/platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", size = 21302 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439 }, +] + [[package]] name = "pluggy" version = "1.5.0" @@ -987,16 +1273,16 @@ wheels = [ [[package]] name = "readme-renderer" -version = "44.0" +version = "43.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "docutils" }, { name = "nh3" }, { name = "pygments" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5a/a9/104ec9234c8448c4379768221ea6df01260cd6c2ce13182d4eac531c8342/readme_renderer-44.0.tar.gz", hash = "sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1", size = 32056 } +sdist = { url = "https://files.pythonhosted.org/packages/fe/b5/536c775084d239df6345dccf9b043419c7e3308bc31be4c7882196abc62e/readme_renderer-43.0.tar.gz", hash = "sha256:1818dd28140813509eeed8d62687f7cd4f7bad90d4db586001c5dc09d4fde311", size = 31768 } wheels = [ - { url = "https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl", hash = "sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151", size = 13310 }, + { url = "https://files.pythonhosted.org/packages/45/be/3ea20dc38b9db08387cf97997a85a7d51527ea2057d71118feb0aa8afa55/readme_renderer-43.0-py3-none-any.whl", hash = "sha256:19db308d86ecd60e5affa3b2a98f017af384678c63c88e5d4556a380e674f3f9", size = 13301 }, ] [[package]] @@ -1134,6 +1420,66 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90", size = 242424 }, ] +[[package]] +name = "ruamel-yaml" +version = "0.18.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ruamel-yaml-clib", marker = "python_full_version < '3.13' and platform_python_implementation == 'CPython'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/29/81/4dfc17eb6ebb1aac314a3eb863c1325b907863a1b8b1382cdffcb6ac0ed9/ruamel.yaml-0.18.6.tar.gz", hash = "sha256:8b27e6a217e786c6fbe5634d8f3f11bc63e0f80f6a5890f28863d9c45aac311b", size = 143362 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/67/8ece580cc363331d9a53055130f86b096bf16e38156e33b1d3014fffda6b/ruamel.yaml-0.18.6-py3-none-any.whl", hash = "sha256:57b53ba33def16c4f3d807c0ccbc00f8a6081827e81ba2491691b76882d0c636", size = 117761 }, +] + +[[package]] +name = "ruamel-yaml-clib" +version = "0.2.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/84/80203abff8ea4993a87d823a5f632e4d92831ef75d404c9fc78d0176d2b5/ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f", size = 225315 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/57/40a958e863e299f0c74ef32a3bde9f2d1ea8d69669368c0c502a0997f57f/ruamel.yaml.clib-0.2.12-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:11f891336688faf5156a36293a9c362bdc7c88f03a8a027c2c1d8e0bcde998e5", size = 131301 }, + { url = "https://files.pythonhosted.org/packages/98/a8/29a3eb437b12b95f50a6bcc3d7d7214301c6c529d8fdc227247fa84162b5/ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a606ef75a60ecf3d924613892cc603b154178ee25abb3055db5062da811fd969", size = 633728 }, + { url = "https://files.pythonhosted.org/packages/35/6d/ae05a87a3ad540259c3ad88d71275cbd1c0f2d30ae04c65dcbfb6dcd4b9f/ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd5415dded15c3822597455bc02bcd66e81ef8b7a48cb71a33628fc9fdde39df", size = 722230 }, + { url = "https://files.pythonhosted.org/packages/7f/b7/20c6f3c0b656fe609675d69bc135c03aac9e3865912444be6339207b6648/ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76", size = 686712 }, + { url = "https://files.pythonhosted.org/packages/cd/11/d12dbf683471f888d354dac59593873c2b45feb193c5e3e0f2ebf85e68b9/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6", size = 663936 }, + { url = "https://files.pythonhosted.org/packages/72/14/4c268f5077db5c83f743ee1daeb236269fa8577133a5cfa49f8b382baf13/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd", size = 696580 }, + { url = "https://files.pythonhosted.org/packages/80/29/c0a017b704aaf3cbf704989785cd9c5d5b8ccec2dae6ac0c53833c84e677/ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da", size = 100326 }, + { url = "https://files.pythonhosted.org/packages/3a/65/fa39d74db4e2d0cd252355732d966a460a41cd01c6353b820a0952432839/ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28", size = 118079 }, + { url = "https://files.pythonhosted.org/packages/fb/8f/683c6ad562f558cbc4f7c029abcd9599148c51c54b5ef0f24f2638da9fbb/ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6", size = 132224 }, + { url = "https://files.pythonhosted.org/packages/3c/d2/b79b7d695e2f21da020bd44c782490578f300dd44f0a4c57a92575758a76/ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d84318609196d6bd6da0edfa25cedfbabd8dbde5140a0a23af29ad4b8f91fb1e", size = 641480 }, + { url = "https://files.pythonhosted.org/packages/68/6e/264c50ce2a31473a9fdbf4fa66ca9b2b17c7455b31ef585462343818bd6c/ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb43a269eb827806502c7c8efb7ae7e9e9d0573257a46e8e952f4d4caba4f31e", size = 739068 }, + { url = "https://files.pythonhosted.org/packages/86/29/88c2567bc893c84d88b4c48027367c3562ae69121d568e8a3f3a8d363f4d/ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52", size = 703012 }, + { url = "https://files.pythonhosted.org/packages/11/46/879763c619b5470820f0cd6ca97d134771e502776bc2b844d2adb6e37753/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642", size = 704352 }, + { url = "https://files.pythonhosted.org/packages/02/80/ece7e6034256a4186bbe50dee28cd032d816974941a6abf6a9d65e4228a7/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2", size = 737344 }, + { url = "https://files.pythonhosted.org/packages/67/58/b1f60a1d591b771298ffa0428237afb092c7f29ae23bad93420b1eb10703/ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4", size = 100205 }, + { url = "https://files.pythonhosted.org/packages/b4/4f/b52f634c9548a9291a70dfce26ca7ebce388235c93588a1068028ea23fcc/ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb", size = 118185 }, + { url = "https://files.pythonhosted.org/packages/48/41/e7a405afbdc26af961678474a55373e1b323605a4f5e2ddd4a80ea80f628/ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632", size = 133433 }, + { url = "https://files.pythonhosted.org/packages/ec/b0/b850385604334c2ce90e3ee1013bd911aedf058a934905863a6ea95e9eb4/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:943f32bc9dedb3abff9879edc134901df92cfce2c3d5c9348f172f62eb2d771d", size = 647362 }, + { url = "https://files.pythonhosted.org/packages/44/d0/3f68a86e006448fb6c005aee66565b9eb89014a70c491d70c08de597f8e4/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c3829bb364fdb8e0332c9931ecf57d9be3519241323c5274bd82f709cebc0c", size = 754118 }, + { url = "https://files.pythonhosted.org/packages/52/a9/d39f3c5ada0a3bb2870d7db41901125dbe2434fa4f12ca8c5b83a42d7c53/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd", size = 706497 }, + { url = "https://files.pythonhosted.org/packages/b0/fa/097e38135dadd9ac25aecf2a54be17ddf6e4c23e43d538492a90ab3d71c6/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31", size = 698042 }, + { url = "https://files.pythonhosted.org/packages/ec/d5/a659ca6f503b9379b930f13bc6b130c9f176469b73b9834296822a83a132/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680", size = 745831 }, + { url = "https://files.pythonhosted.org/packages/b1/82/85cb92f15a4231c89b95dfe08b09eb6adca929ef7df7e17ab59902b6f589/ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5", size = 98777 }, + { url = "https://files.pythonhosted.org/packages/d7/8f/c3654f6f1ddb75daf3922c3d8fc6005b1ab56671ad56ffb874d908bfa668/ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4", size = 115523 }, + { url = "https://files.pythonhosted.org/packages/29/00/4864119668d71a5fa45678f380b5923ff410701565821925c69780356ffa/ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a", size = 132011 }, + { url = "https://files.pythonhosted.org/packages/7f/5e/212f473a93ae78c669ffa0cb051e3fee1139cb2d385d2ae1653d64281507/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:e7e3736715fbf53e9be2a79eb4db68e4ed857017344d697e8b9749444ae57475", size = 642488 }, + { url = "https://files.pythonhosted.org/packages/1f/8f/ecfbe2123ade605c49ef769788f79c38ddb1c8fa81e01f4dbf5cf1a44b16/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7e75b4965e1d4690e93021adfcecccbca7d61c7bddd8e22406ef2ff20d74ef", size = 745066 }, + { url = "https://files.pythonhosted.org/packages/e2/a9/28f60726d29dfc01b8decdb385de4ced2ced9faeb37a847bd5cf26836815/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6", size = 701785 }, + { url = "https://files.pythonhosted.org/packages/84/7e/8e7ec45920daa7f76046578e4f677a3215fe8f18ee30a9cb7627a19d9b4c/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf", size = 693017 }, + { url = "https://files.pythonhosted.org/packages/c5/b3/d650eaade4ca225f02a648321e1ab835b9d361c60d51150bac49063b83fa/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1", size = 741270 }, + { url = "https://files.pythonhosted.org/packages/30/8c/ed73f047a73638257aa9377ad356bea4d96125b305c34a28766f4445cc0f/ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6", size = 98583 }, + { url = "https://files.pythonhosted.org/packages/b0/85/e8e751d8791564dd333d5d9a4eab0a7a115f7e349595417fd50ecae3395c/ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3", size = 115190 }, + { url = "https://files.pythonhosted.org/packages/e5/46/ccdef7a84ad745c37cb3d9a81790f28fbc9adf9c237dba682017b123294e/ruamel.yaml.clib-0.2.12-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987", size = 131834 }, + { url = "https://files.pythonhosted.org/packages/29/09/932360f30ad1b7b79f08757e0a6fb8c5392a52cdcc182779158fe66d25ac/ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bc5f1e1c28e966d61d2519f2a3d451ba989f9ea0f2307de7bc45baa526de9e45", size = 636120 }, + { url = "https://files.pythonhosted.org/packages/a2/2a/5b27602e7a4344c1334e26bf4739746206b7a60a8acdba33a61473468b73/ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a0e060aace4c24dcaf71023bbd7d42674e3b230f7e7b97317baf1e953e5b519", size = 724914 }, + { url = "https://files.pythonhosted.org/packages/da/1c/23497017c554fc06ff5701b29355522cff850f626337fff35d9ab352cb18/ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7", size = 689072 }, + { url = "https://files.pythonhosted.org/packages/68/e6/f3d4ff3223f9ea49c3b7169ec0268e42bd49f87c70c0e3e853895e4a7ae2/ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285", size = 667091 }, + { url = "https://files.pythonhosted.org/packages/84/62/ead07043527642491e5011b143f44b81ef80f1025a96069b7210e0f2f0f3/ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed", size = 699111 }, + { url = "https://files.pythonhosted.org/packages/6e/b3/7feb99a00bfaa5c6868617bb7651308afde85e5a0b23cd187fe5de65feeb/ruamel.yaml.clib-0.2.12-cp39-cp39-win32.whl", hash = "sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12", size = 100863 }, + { url = "https://files.pythonhosted.org/packages/93/07/de635108684b7a5bb06e432b0930c5a04b6c59efe73bd966d8db3cc208f2/ruamel.yaml.clib-0.2.12-cp39-cp39-win_amd64.whl", hash = "sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b", size = 118653 }, +] + [[package]] name = "ruff" version = "0.7.3" @@ -1261,6 +1607,232 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/21/df/7c6bb83dcb45b35dc35b310d752f254211cde0bcd2a35290ea6e2862b2a9/setuptools-75.4.0-py3-none-any.whl", hash = "sha256:b3c5d862f98500b06ffdf7cc4499b48c46c317d8d56cb30b5c8bce4d88f5c216", size = 1223131 }, ] +[[package]] +name = "six" +version = "1.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/71/39/171f1c67cd00715f190ba0b100d606d440a28c93c7714febeca8b79af85e/six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", size = 34041 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/5a/e7c31adbe875f2abbb91bd84cf2dc52d792b5a01506781dbcf25c91daf11/six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254", size = 11053 }, +] + +[[package]] +name = "snowballstemmer" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/44/7b/af302bebf22c749c56c9c3e8ae13190b5b5db37a33d9068652e8f73b7089/snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1", size = 86699 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/dc/c02e01294f7265e63a7315fe086dd1df7dacb9f840a804da846b96d01b96/snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a", size = 93002 }, +] + +[[package]] +name = "soupsieve" +version = "2.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/ce/fbaeed4f9fb8b2daa961f90591662df6a86c1abf25c548329a86920aedfb/soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb", size = 101569 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/c2/fe97d779f3ef3b15f05c94a2f1e3d21732574ed441687474db9d342a7315/soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9", size = 36186 }, +] + +[[package]] +name = "sphinx" +version = "6.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "alabaster" }, + { name = "babel" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "docutils" }, + { name = "imagesize" }, + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "jinja2" }, + { name = "packaging" }, + { name = "pygments" }, + { name = "requests" }, + { name = "snowballstemmer" }, + { name = "sphinxcontrib-applehelp" }, + { name = "sphinxcontrib-devhelp" }, + { name = "sphinxcontrib-htmlhelp" }, + { name = "sphinxcontrib-jsmath" }, + { name = "sphinxcontrib-qthelp" }, + { name = "sphinxcontrib-serializinghtml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0f/6d/392defcc95ca48daf62aecb89550143e97a4651275e62a3d7755efe35a3a/Sphinx-6.2.1.tar.gz", hash = "sha256:6d56a34697bb749ffa0152feafc4b19836c755d90a7c59b72bc7dfd371b9cc6b", size = 6681092 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/d8/45ba6097c39ba44d9f0e1462fb232e13ca4ddb5aea93a385dcfa964687da/sphinx-6.2.1-py3-none-any.whl", hash = "sha256:97787ff1fa3256a3eef9eda523a63dbf299f7b47e053cfcf684a1c2a8380c912", size = 3024615 }, +] + +[[package]] +name = "sphinx-autodoc-typehints" +version = "1.23.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/30/9764a2c735c655c3065f32072fb3d8c6fd5dda8df294d4e9f05670d60e31/sphinx_autodoc_typehints-1.23.0.tar.gz", hash = "sha256:5d44e2996633cdada499b6d27a496ddf9dbc95dd1f0f09f7b37940249e61f6e9", size = 35945 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/be/792b64ddacfcff362062077689ce37eb9750b9924fc0a14f623fa71ffaf6/sphinx_autodoc_typehints-1.23.0-py3-none-any.whl", hash = "sha256:ac099057e66b09e51b698058ba7dd76e57e1fe696cd91b54e121d3dad188f91d", size = 17896 }, +] + +[[package]] +name = "sphinx-autodoc2" +version = "0.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/17/5f/5350046d1aa1a56b063ae08b9ad871025335c9d55fe2372896ea48711da9/sphinx_autodoc2-0.5.0.tar.gz", hash = "sha256:7d76044aa81d6af74447080182b6868c7eb066874edc835e8ddf810735b6565a", size = 115077 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/19/e6/48d47961bbdae755ba9c17dfc65d89356312c67668dcb36c87cfadfa1964/sphinx_autodoc2-0.5.0-py3-none-any.whl", hash = "sha256:e867013b1512f9d6d7e6f6799f8b537d6884462acd118ef361f3f619a60b5c9e", size = 43385 }, +] + +[[package]] +name = "sphinx-basic-ng" +version = "1.0.0b2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/0b/a866924ded68efec7a1759587a4e478aec7559d8165fac8b2ad1c0e774d6/sphinx_basic_ng-1.0.0b2.tar.gz", hash = "sha256:9ec55a47c90c8c002b5960c57492ec3021f5193cb26cebc2dc4ea226848651c9", size = 20736 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/dd/018ce05c532a22007ac58d4f45232514cd9d6dd0ee1dc374e309db830983/sphinx_basic_ng-1.0.0b2-py3-none-any.whl", hash = "sha256:eb09aedbabfb650607e9b4b68c9d240b90b1e1be221d6ad71d61c52e29f7932b", size = 22496 }, +] + +[[package]] +name = "sphinx-jinja2-compat" +version = "0.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jinja2" }, + { name = "markupsafe" }, + { name = "standard-imghdr", marker = "python_full_version >= '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/26/df/27282da6f8c549f765beca9de1a5fc56f9651ed87711a5cac1e914137753/sphinx_jinja2_compat-0.3.0.tar.gz", hash = "sha256:f3c1590b275f42e7a654e081db5e3e5fb97f515608422bde94015ddf795dfe7c", size = 4998 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6f/42/2fd09d672eaaa937d6893d8b747d07943f97a6e5e30653aee6ebd339b704/sphinx_jinja2_compat-0.3.0-py3-none-any.whl", hash = "sha256:b1e4006d8e1ea31013fa9946d1b075b0c8d2a42c6e3425e63542c1e9f8be9084", size = 7883 }, +] + +[[package]] +name = "sphinx-prompt" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "pygments" }, + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d0/fb/a252124876be4e2eb2a25536c76e817e10d96ecd47b971016b960cb8d726/sphinx_prompt-1.6.0.tar.gz", hash = "sha256:0ae92cdae0962896827e11880d3e2317b436491663e83a8289a9c9c1b7d758ae", size = 5502 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/25/a9252d0ccf84247b602a47cbe1ec6669647971841594ee8a1ed38b7b1c38/sphinx_prompt-1.6.0-py3-none-any.whl", hash = "sha256:a118fc1519f367dfffd73fbc34e1d905e38929dec6a3971518a331bf0689b0df", size = 5225 }, +] + +[[package]] +name = "sphinx-tabs" +version = "3.4.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "pygments" }, + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/27/32/ab475e252dc2b704e82a91141fa404cdd8901a5cf34958fd22afacebfccd/sphinx-tabs-3.4.5.tar.gz", hash = "sha256:ba9d0c1e3e37aaadd4b5678449eb08176770e0fc227e769b6ce747df3ceea531", size = 16070 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/9f/4ac7dbb9f23a2ff5a10903a4f9e9f43e0ff051f63a313e989c962526e305/sphinx_tabs-3.4.5-py3-none-any.whl", hash = "sha256:92cc9473e2ecf1828ca3f6617d0efc0aa8acb06b08c56ba29d1413f2f0f6cf09", size = 9904 }, +] + +[[package]] +name = "sphinx-toolbox" +version = "3.8.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "apeye" }, + { name = "autodocsumm" }, + { name = "beautifulsoup4" }, + { name = "cachecontrol", extra = ["filecache"] }, + { name = "dict2css" }, + { name = "docutils" }, + { name = "domdf-python-tools" }, + { name = "filelock" }, + { name = "html5lib" }, + { name = "ruamel-yaml" }, + { name = "sphinx" }, + { name = "sphinx-autodoc-typehints" }, + { name = "sphinx-jinja2-compat" }, + { name = "sphinx-prompt" }, + { name = "sphinx-tabs" }, + { name = "tabulate" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/30/80/f837e85c8c216cdeef9b60393e4b00c9092a1e3d734106e0021abbf5930c/sphinx_toolbox-3.8.1.tar.gz", hash = "sha256:a4b39a6ea24fc8f10e24f052199bda17837a0bf4c54163a56f521552395f5e1a", size = 111977 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/d6/2a28ee4cbc158ae65afb2cfcb6895ef54d972ce1e167f8a63c135b14b080/sphinx_toolbox-3.8.1-py3-none-any.whl", hash = "sha256:53d8e77dd79e807d9ef18590c4b2960a5aa3c147415054b04c31a91afed8b88b", size = 194621 }, +] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/6e/b837e84a1a704953c62ef8776d45c3e8d759876b4a84fe14eba2859106fe/sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1", size = 20053 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/85/9ebeae2f76e9e77b952f4b274c27238156eae7979c5421fba91a28f4970d/sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5", size = 119300 }, +] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/d2/5beee64d3e4e747f316bae86b55943f51e82bb86ecd325883ef65741e7da/sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad", size = 12967 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/7a/987e583882f985fe4d7323774889ec58049171828b58c2217e7f79cdf44e/sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2", size = 82530 }, +] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/93/983afd9aa001e5201eab16b5a444ed5b9b0a7a010541e0ddfbbfd0b2470c/sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9", size = 22617 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/7b/18a8c0bcec9182c05a0b3ec2a776bba4ead82750a55ff798e8d406dae604/sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8", size = 98705 }, +] + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8", size = 5787 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", size = 5071 }, +] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/68/bc/9104308fc285eb3e0b31b67688235db556cd5b0ef31d96f30e45f2e51cae/sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab", size = 17165 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/83/859ecdd180cacc13b1f7e857abf8582a64552ea7a061057a6c716e790fce/sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb", size = 88743 }, +] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3b/44/6716b257b0aa6bfd51a1b31665d1c205fb12cb5ad56de752dfa15657de2f/sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d", size = 16080 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/a7/d2782e4e3f77c8450f727ba74a8f12756d5ba823d81b941f1b04da9d033a/sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331", size = 92072 }, +] + +[[package]] +name = "standard-imghdr" +version = "3.10.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/09/d2/2eb5521072c9598886035c65c023f39f7384bcb73eed70794f469e34efac/standard_imghdr-3.10.14.tar.gz", hash = "sha256:2598fe2e7c540dbda34b233295e10957ab8dc8ac6f3bd9eaa8d38be167232e52", size = 5474 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/d0/9852f70eb01f814843530c053542b72d30e9fbf74da7abb0107e71938389/standard_imghdr-3.10.14-py3-none-any.whl", hash = "sha256:cdf6883163349624dee9a81d2853a20260337c4cd41c04e99c082e01833a08e2", size = 5598 }, +] + [[package]] name = "submitit" version = "1.5.2" @@ -1286,6 +1858,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b2/fe/81695a1aa331a842b582453b605175f419fe8540355886031328089d840a/sympy-1.13.1-py3-none-any.whl", hash = "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8", size = 6189177 }, ] +[[package]] +name = "tabulate" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/fe/802052aecb21e3797b8f7902564ab6ea0d60ff8ca23952079064155d1ae1/tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c", size = 81090 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/44/4a5f08c96eb108af5cb50b41f76142f0afa346dfa99d5296fe7202a11854/tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f", size = 35252 }, +] + [[package]] name = "tokenizers" version = "0.20.3" @@ -1428,7 +2009,7 @@ wheels = [ [[package]] name = "torchrunx" -version = "0.2.4" +version = "0.3.0" source = { editable = "." } dependencies = [ { name = "cloudpickle" }, @@ -1449,6 +2030,13 @@ dev-extras = [ { name = "submitit" }, { name = "transformers" }, ] +docs = [ + { name = "furo" }, + { name = "myst-parser" }, + { name = "sphinx" }, + { name = "sphinx-autodoc2" }, + { name = "sphinx-toolbox" }, +] [package.metadata] requires-dist = [ @@ -1470,6 +2058,13 @@ dev-extras = [ { name = "submitit" }, { name = "transformers" }, ] +docs = [ + { name = "furo" }, + { name = "myst-parser" }, + { name = "sphinx", specifier = "==6.2.1" }, + { name = "sphinx-autodoc2" }, + { name = "sphinx-toolbox" }, +] [[package]] name = "tqdm" @@ -1556,6 +2151,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ce/d9/5f4c13cecde62396b0d3fe530a50ccea91e7dfc1ccf0e09c228841bb5ba8/urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", size = 126338 }, ] +[[package]] +name = "webencodings" +version = "0.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/02/ae6ceac1baeda530866a85075641cec12989bd8d31af6d5ab4a3e8c92f47/webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923", size = 9721 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/24/2a3e3df732393fed8b3ebf2ec078f05546de641fe1b667ee316ec1dcf3b7/webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", size = 11774 }, +] + [[package]] name = "wrapt" version = "1.16.0" From 5144a1a3e9e9fd9bb07249412d12814dfd2dc46e Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 17:32:00 -0500 Subject: [PATCH 016/141] fixed uv sync command? --- docs/.readthedocs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index 98530865..b61a7a34 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -8,7 +8,7 @@ build: - asdf plugin add uv - asdf install uv latest - asdf global uv latest - - uv sync --extra docs --frozen + - uv sync --group docs --frozen - uv run -m sphinx -T -b html -d docs/_build/doctrees -D language=en docs $READTHEDOCS_OUTPUT/html sphinx: From e5bd0e4c62e57b715b416cff7c974613ea3ce564 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 17:39:02 -0500 Subject: [PATCH 017/141] update docs structure once more --- docs/{source/conf.py => .conf.py} | 0 docs/.readthedocs.yaml | 4 ++-- docs/{source => }/advanced.md | 0 docs/{source => }/api.md | 0 docs/{source => }/contributing.md | 0 docs/{source => }/how_it_works.md | 0 docs/{source => }/index.md | 0 7 files changed, 2 insertions(+), 2 deletions(-) rename docs/{source/conf.py => .conf.py} (100%) rename docs/{source => }/advanced.md (100%) rename docs/{source => }/api.md (100%) rename docs/{source => }/contributing.md (100%) rename docs/{source => }/how_it_works.md (100%) rename docs/{source => }/index.md (100%) diff --git a/docs/source/conf.py b/docs/.conf.py similarity index 100% rename from docs/source/conf.py rename to docs/.conf.py diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index b61a7a34..3c52fa2c 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -9,7 +9,7 @@ build: - asdf install uv latest - asdf global uv latest - uv sync --group docs --frozen - - uv run -m sphinx -T -b html -d docs/_build/doctrees -D language=en docs $READTHEDOCS_OUTPUT/html + - uv run -m sphinx -T -b html -d _build/doctrees -D language=en . $READTHEDOCS_OUTPUT/html sphinx: - configuration: docs/source/conf.py + configuration: docs/.conf.py diff --git a/docs/source/advanced.md b/docs/advanced.md similarity index 100% rename from docs/source/advanced.md rename to docs/advanced.md diff --git a/docs/source/api.md b/docs/api.md similarity index 100% rename from docs/source/api.md rename to docs/api.md diff --git a/docs/source/contributing.md b/docs/contributing.md similarity index 100% rename from docs/source/contributing.md rename to docs/contributing.md diff --git a/docs/source/how_it_works.md b/docs/how_it_works.md similarity index 100% rename from docs/source/how_it_works.md rename to docs/how_it_works.md diff --git a/docs/source/index.md b/docs/index.md similarity index 100% rename from docs/source/index.md rename to docs/index.md From f27ac01bc50cceaac220403531bcb45c3e0459b3 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 17:51:13 -0500 Subject: [PATCH 018/141] fix readthedocs conf.py --- docs/.readthedocs.yaml | 5 +---- docs/{.conf.py => conf.py} | 1 + 2 files changed, 2 insertions(+), 4 deletions(-) rename docs/{.conf.py => conf.py} (99%) diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index 3c52fa2c..a55fa974 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -9,7 +9,4 @@ build: - asdf install uv latest - asdf global uv latest - uv sync --group docs --frozen - - uv run -m sphinx -T -b html -d _build/doctrees -D language=en . $READTHEDOCS_OUTPUT/html - -sphinx: - configuration: docs/.conf.py + - uv run -m sphinx -T -j auto -b html -d docs/_build/doctrees docs $READTHEDOCS_OUTPUT/html diff --git a/docs/.conf.py b/docs/conf.py similarity index 99% rename from docs/.conf.py rename to docs/conf.py index 39aac572..9df271ea 100644 --- a/docs/.conf.py +++ b/docs/conf.py @@ -9,6 +9,7 @@ github_username = "apoorvkh" github_repository = "torchrunx" html_theme = "furo" +language = "en" extensions = [ "sphinx.ext.duration", From 675f05cdee3fc664d34e17a4ae745380c9a6c485 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 17:58:06 -0500 Subject: [PATCH 019/141] refactor again --- docs/.readthedocs.yaml | 2 +- docs/conf.py | 5 +---- docs/{ => source}/advanced.md | 0 docs/{ => source}/api.md | 0 docs/{ => source}/contributing.md | 0 docs/{ => source}/how_it_works.md | 0 docs/{ => source}/index.md | 0 7 files changed, 2 insertions(+), 5 deletions(-) rename docs/{ => source}/advanced.md (100%) rename docs/{ => source}/api.md (100%) rename docs/{ => source}/contributing.md (100%) rename docs/{ => source}/how_it_works.md (100%) rename docs/{ => source}/index.md (100%) diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index a55fa974..a4e2e7d2 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -9,4 +9,4 @@ build: - asdf install uv latest - asdf global uv latest - uv sync --group docs --frozen - - uv run -m sphinx -T -j auto -b html -d docs/_build/doctrees docs $READTHEDOCS_OUTPUT/html + - uv run -m sphinx -T -j auto -b html -d docs/.doctrees -c docs docs/source $READTHEDOCS_OUTPUT/html diff --git a/docs/conf.py b/docs/conf.py index 9df271ea..00e5463f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,7 +1,4 @@ import os -import sys - -sys.path.insert(0, os.path.abspath("../../src")) # Configuration file for the Sphinx documentation builder. @@ -23,7 +20,7 @@ ] autodoc2_packages = [ - "../../src/torchrunx", + "../src/torchrunx", ] autodoc2_render_plugin = "myst" diff --git a/docs/advanced.md b/docs/source/advanced.md similarity index 100% rename from docs/advanced.md rename to docs/source/advanced.md diff --git a/docs/api.md b/docs/source/api.md similarity index 100% rename from docs/api.md rename to docs/source/api.md diff --git a/docs/contributing.md b/docs/source/contributing.md similarity index 100% rename from docs/contributing.md rename to docs/source/contributing.md diff --git a/docs/how_it_works.md b/docs/source/how_it_works.md similarity index 100% rename from docs/how_it_works.md rename to docs/source/how_it_works.md diff --git a/docs/index.md b/docs/source/index.md similarity index 100% rename from docs/index.md rename to docs/source/index.md From b3bdb3510b2fcc7cc50cacad35750b33d94e8a48 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 18:07:42 -0500 Subject: [PATCH 020/141] update docs deps --- docs/.readthedocs.yaml | 2 +- pyproject.toml | 2 +- uv.lock | 31 ++++++++++++++++--------------- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index a4e2e7d2..b873bcae 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -9,4 +9,4 @@ build: - asdf install uv latest - asdf global uv latest - uv sync --group docs --frozen - - uv run -m sphinx -T -j auto -b html -d docs/.doctrees -c docs docs/source $READTHEDOCS_OUTPUT/html + - uv run -m sphinx --builder html --jobs auto --doctree-dir docs/.doctrees --conf-dir docs --show-traceback docs/source $READTHEDOCS_OUTPUT/html diff --git a/pyproject.toml b/pyproject.toml index e122b154..09bd0ae0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,7 @@ dependencies = [ [dependency-groups] dev = ["ruff", "pyright", "pytest", "build", "twine"] dev-extras = ["submitit", "transformers"] -docs = ["sphinx==6.2.1", "furo", "myst-parser", "sphinx-autodoc2", "sphinx-toolbox"] +docs = ["sphinx==7.4.7", "furo==2024.8.6", "myst-parser==3.0.1", "sphinx-autodoc2==0.5.0", "sphinx-toolbox==3.8.1"] [tool.uv] managed = true diff --git a/uv.lock b/uv.lock index fd5adfc4..4c6efd26 100644 --- a/uv.lock +++ b/uv.lock @@ -430,11 +430,11 @@ wheels = [ [[package]] name = "docutils" -version = "0.19" +version = "0.21.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/330ea8d383eb2ce973df34d1239b3b21e91cd8c865d21ff82902d952f91f/docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6", size = 2056383 } +sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444 } wheels = [ - { url = "https://files.pythonhosted.org/packages/93/69/e391bd51bc08ed9141ecd899a0ddb61ab6465309f1eb470905c0c8868081/docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc", size = 570472 }, + { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408 }, ] [[package]] @@ -1636,7 +1636,7 @@ wheels = [ [[package]] name = "sphinx" -version = "6.2.1" +version = "7.4.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "alabaster" }, @@ -1656,10 +1656,11 @@ dependencies = [ { name = "sphinxcontrib-jsmath" }, { name = "sphinxcontrib-qthelp" }, { name = "sphinxcontrib-serializinghtml" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0f/6d/392defcc95ca48daf62aecb89550143e97a4651275e62a3d7755efe35a3a/Sphinx-6.2.1.tar.gz", hash = "sha256:6d56a34697bb749ffa0152feafc4b19836c755d90a7c59b72bc7dfd371b9cc6b", size = 6681092 } +sdist = { url = "https://files.pythonhosted.org/packages/5b/be/50e50cb4f2eff47df05673d361095cafd95521d2a22521b920c67a372dcb/sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe", size = 8067911 } wheels = [ - { url = "https://files.pythonhosted.org/packages/5f/d8/45ba6097c39ba44d9f0e1462fb232e13ca4ddb5aea93a385dcfa964687da/sphinx-6.2.1-py3-none-any.whl", hash = "sha256:97787ff1fa3256a3eef9eda523a63dbf299f7b47e053cfcf684a1c2a8380c912", size = 3024615 }, + { url = "https://files.pythonhosted.org/packages/0d/ef/153f6803c5d5f8917dbb7f7fcf6d34a871ede3296fa89c2c703f5f8a6c8e/sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239", size = 3401624 }, ] [[package]] @@ -1716,16 +1717,16 @@ wheels = [ [[package]] name = "sphinx-prompt" -version = "1.6.0" +version = "1.8.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "docutils" }, { name = "pygments" }, { name = "sphinx" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d0/fb/a252124876be4e2eb2a25536c76e817e10d96ecd47b971016b960cb8d726/sphinx_prompt-1.6.0.tar.gz", hash = "sha256:0ae92cdae0962896827e11880d3e2317b436491663e83a8289a9c9c1b7d758ae", size = 5502 } +sdist = { url = "https://files.pythonhosted.org/packages/e7/fb/7a07b8df1ca2418147a6b13e3f6b445071f2565198b45efa631d0d6ef0cd/sphinx_prompt-1.8.0.tar.gz", hash = "sha256:47482f86fcec29662fdfd23e7c04ef03582714195d01f5d565403320084372ed", size = 5121 } wheels = [ - { url = "https://files.pythonhosted.org/packages/11/25/a9252d0ccf84247b602a47cbe1ec6669647971841594ee8a1ed38b7b1c38/sphinx_prompt-1.6.0-py3-none-any.whl", hash = "sha256:a118fc1519f367dfffd73fbc34e1d905e38929dec6a3971518a331bf0689b0df", size = 5225 }, + { url = "https://files.pythonhosted.org/packages/39/49/f890a2668b7cbf375f5528b549c8d36dd2e801b0fbb7b2b5ef65663ecb6c/sphinx_prompt-1.8.0-py3-none-any.whl", hash = "sha256:369ecc633f0711886f9b3a078c83264245be1adf46abeeb9b88b5519e4b51007", size = 7298 }, ] [[package]] @@ -2059,11 +2060,11 @@ dev-extras = [ { name = "transformers" }, ] docs = [ - { name = "furo" }, - { name = "myst-parser" }, - { name = "sphinx", specifier = "==6.2.1" }, - { name = "sphinx-autodoc2" }, - { name = "sphinx-toolbox" }, + { name = "furo", specifier = "==2024.8.6" }, + { name = "myst-parser", specifier = "==3.0.1" }, + { name = "sphinx", specifier = "==7.4.7" }, + { name = "sphinx-autodoc2", specifier = "==0.5.0" }, + { name = "sphinx-toolbox", specifier = "==3.8.1" }, ] [[package]] @@ -2104,7 +2105,7 @@ name = "triton" version = "3.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "filelock" }, + { name = "filelock", marker = "python_full_version < '3.13'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/98/29/69aa56dc0b2eb2602b553881e34243475ea2afd9699be042316842788ff5/triton-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b0dd10a925263abbe9fa37dcde67a5e9b2383fc269fdf59f5657cac38c5d1d8", size = 209460013 }, From d28aa4605823019d317d2b27da4807bfa09f0ff3 Mon Sep 17 00:00:00 2001 From: Apoorv Khandelwal Date: Mon, 2 Dec 2024 15:29:46 -0800 Subject: [PATCH 021/141] Rename .readthedocs.yaml to readthedocs.yaml --- docs/{.readthedocs.yaml => readthedocs.yaml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/{.readthedocs.yaml => readthedocs.yaml} (100%) diff --git a/docs/.readthedocs.yaml b/docs/readthedocs.yaml similarity index 100% rename from docs/.readthedocs.yaml rename to docs/readthedocs.yaml From 1825c077922564dea16291e4970885437d061657 Mon Sep 17 00:00:00 2001 From: Apoorv Khandelwal Date: Mon, 2 Dec 2024 15:30:45 -0800 Subject: [PATCH 022/141] Rename readthedocs.yaml to .readthedocs.yaml --- docs/{readthedocs.yaml => .readthedocs.yaml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/{readthedocs.yaml => .readthedocs.yaml} (100%) diff --git a/docs/readthedocs.yaml b/docs/.readthedocs.yaml similarity index 100% rename from docs/readthedocs.yaml rename to docs/.readthedocs.yaml From e5e05a9428aee7d48a2b7b565c3828daa7a647df Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 23:11:55 -0500 Subject: [PATCH 023/141] update docs config --- docs/.readthedocs.yaml | 3 +-- docs/conf.py | 16 +++++----------- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index b873bcae..1a50e3cf 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -8,5 +8,4 @@ build: - asdf plugin add uv - asdf install uv latest - asdf global uv latest - - uv sync --group docs --frozen - - uv run -m sphinx --builder html --jobs auto --doctree-dir docs/.doctrees --conf-dir docs --show-traceback docs/source $READTHEDOCS_OUTPUT/html + - uv run --only-group docs python -m sphinx --builder html --jobs auto --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source $READTHEDOCS_OUTPUT/html diff --git a/docs/conf.py b/docs/conf.py index 00e5463f..a6d712e1 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -9,23 +9,17 @@ language = "en" extensions = [ - "sphinx.ext.duration", - "sphinx.ext.intersphinx", "autodoc2", - "myst_parser", + "myst_parser", # support markdown + "sphinx.ext.intersphinx", # link to external docs + "sphinx.ext.napoleon", # for google style docstrings + "sphinx.ext.linkcode", # link to github source "sphinx_toolbox.sidebar_links", "sphinx_toolbox.github", - "sphinx.ext.napoleon", - "sphinx.ext.linkcode", ] -autodoc2_packages = [ - "../src/torchrunx", -] -autodoc2_render_plugin = "myst" - maximum_signature_line_length = 100 - +autodoc2_render_plugin = "myst" intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), } From 90ad6f67ae7cd6fa25913f84a8dc484e7a44fa85 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 2 Dec 2024 23:20:02 -0500 Subject: [PATCH 024/141] misc --- docs/conf.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index a6d712e1..247f114e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,6 +1,4 @@ -import os - -# Configuration file for the Sphinx documentation builder. +"""Configuration file for the Sphinx documentation builder.""" project = "torchrunx" github_username = "apoorvkh" From 2ab01274096a9e6b89519e6ef77921ee3af2f9e0 Mon Sep 17 00:00:00 2001 From: Apoorv Khandelwal Date: Mon, 2 Dec 2024 23:54:42 -0800 Subject: [PATCH 025/141] Add autodoc2_packages back --- docs/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/conf.py b/docs/conf.py index 247f114e..60deb063 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -17,6 +17,7 @@ ] maximum_signature_line_length = 100 +autodoc2_packages = ["../src/torchrunx"] autodoc2_render_plugin = "myst" intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), From b3befadc8890651c27bff32116a995b047068cca Mon Sep 17 00:00:00 2001 From: Apoorv Khandelwal Date: Tue, 3 Dec 2024 00:01:16 -0800 Subject: [PATCH 026/141] Update conf.py --- docs/conf.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 60deb063..eaedb8c2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,4 +1,6 @@ """Configuration file for the Sphinx documentation builder.""" +import sys +sys.path.insert(0, os.path.abspath("../../src")) project = "torchrunx" github_username = "apoorvkh" @@ -17,7 +19,7 @@ ] maximum_signature_line_length = 100 -autodoc2_packages = ["../src/torchrunx"] +autodoc2_packages = ["../../src/torchrunx"] autodoc2_render_plugin = "myst" intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), From 0e220a4797ff42c5c2e93422a4dfbc24d8849a8c Mon Sep 17 00:00:00 2001 From: Apoorv Khandelwal Date: Tue, 3 Dec 2024 00:02:53 -0800 Subject: [PATCH 027/141] Update conf.py --- docs/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/conf.py b/docs/conf.py index eaedb8c2..6acb4736 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,5 +1,6 @@ """Configuration file for the Sphinx documentation builder.""" import sys +import os sys.path.insert(0, os.path.abspath("../../src")) project = "torchrunx" From 1f4887857862676c15cb6dcf5963700f5ccf844f Mon Sep 17 00:00:00 2001 From: Apoorv Khandelwal Date: Tue, 3 Dec 2024 00:05:53 -0800 Subject: [PATCH 028/141] Update conf.py --- docs/conf.py | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 6acb4736..6eb89fe7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,7 +20,6 @@ ] maximum_signature_line_length = 100 -autodoc2_packages = ["../../src/torchrunx"] autodoc2_render_plugin = "myst" intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), From 8183773c1bdebed8503644afa3e6e11f61ebef8a Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Tue, 3 Dec 2024 17:27:08 -0500 Subject: [PATCH 029/141] fixes for sphinx build --- docs/.readthedocs.yaml | 2 +- docs/conf.py | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index 1a50e3cf..4aa30e7d 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -8,4 +8,4 @@ build: - asdf plugin add uv - asdf install uv latest - asdf global uv latest - - uv run --only-group docs python -m sphinx --builder html --jobs auto --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source $READTHEDOCS_OUTPUT/html + - uv run --group docs python -m sphinx --builder html --jobs auto --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source $READTHEDOCS_OUTPUT/html diff --git a/docs/conf.py b/docs/conf.py index 6eb89fe7..cc51e831 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,7 +1,4 @@ """Configuration file for the Sphinx documentation builder.""" -import sys -import os -sys.path.insert(0, os.path.abspath("../../src")) project = "torchrunx" github_username = "apoorvkh" @@ -22,12 +19,14 @@ maximum_signature_line_length = 100 autodoc2_render_plugin = "myst" intersphinx_mapping = { - "python": ("https://docs.python.org/3/", None), + "python": ("https://docs.python.org/3.9", None), + "fabric": ("https://docs.fabfile.org/en/stable", None), + 'torch': ('https://pytorch.org/docs/stable', None), + "numpy": ("https://numpy.org/doc/stable", None), } -intersphinx_disabled_domains = ["std"] - -## Link code to Github source +## sphinx.ext.linkcode configuration +# Link code to Github source # From: https://github.com/scikit-learn/scikit-learn/blob/main/doc/sphinxext/github_link.py import inspect @@ -86,4 +85,4 @@ def linkcode_resolve(domain, info): lineno = "" return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno) -## End of "link code to Github source" +## End of "sphinx.ext.linkcode configuration" From cf806f4751f4eab4c925faf2fdb370c20abeb225 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 29 Dec 2024 00:12:59 -0500 Subject: [PATCH 030/141] updates to README --- README.md | 100 +++++++++++++++++++++++++++++------------------------- 1 file changed, 54 insertions(+), 46 deletions(-) diff --git a/README.md b/README.md index 23423e12..f9a3e296 100644 --- a/README.md +++ b/README.md @@ -15,39 +15,36 @@ By [Apoorv Khandelwal](http://apoorvkh.com) and [Peter Curtin](https://github.co **`torchrunx`** is a more convenient, *functional* replacement for CLI-based distributed PyTorch launchers (`torchrun`, `accelerate launch`, `deepspeed`, etc). -Simply put, you can distribute PyTorch functions from Python like: - -```python -def train(num_steps: int): ... # implemented below +```bash +pip install torchrunx +``` -import torchrunx as trx +Requires: Linux (+ SSH & shared filesystem if using multiple machines) -# Run train(num_steps=10) on 2 machines with 2 GPUs each +**Example: Training a model on 2 machines with 2 GPUs each** -result = trx.launch( - func=train, - func_kwargs=dict(num_steps=10), - hostnames=["localhost", "other_node"], - workers_per_host=2 -) +```python +import os +import torch +import torch.nn as nn -trained_model = result.rank(0) -torch.save(trained_model.state_dict(), "model.pth") +def train(model: nn.Module, num_steps: int) -> nn.Module | None: + # ... + rank = int(os.environ['RANK']) + if rank == 0: + return model.cpu() ```
Training function (expand) ```python -import os -import torch - -def train(num_steps: int = 5): +def train(model: nn.Module, num_steps: int = 5) -> nn.Module | None: rank = int(os.environ['RANK']) local_rank = int(os.environ['LOCAL_RANK']) - model = torch.nn.Linear(10, 10).to(local_rank) - ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank]) + model.to(local_rank) + ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=[local_rank]) optimizer = torch.optim.AdamW(ddp_model.parameters()) for step in range(10): @@ -58,54 +55,65 @@ def train(num_steps: int = 5): optimizer.step() if rank == 0: - return model + return model.cpu() ```
-**Refer to our [API](https://torchrunx.readthedocs.io/stable/api.html) and [Advanced Usage Guide](https://torchrunx.readthedocs.io/stable/advanced.html) for many more capabilities!** - -## Installation +```python +import torch.nn as nn +import torchrunx + +results = torchrunx.launch( + func = train, + func_kwargs = dict( + model = nn.Linear(10, 10), + num_steps = 10 + ), + hostnames = ["localhost", "second_machine"], + workers_per_host = 2 +) -```bash -pip install torchrunx +trained_model: nn.Module = results.rank(0) +torch.save(trained_model.state_dict(), "model.pth") ``` -**Requires:** Linux (+ SSH & shared filesystems if using multiple machines) - -## Why? +**Refer to our [API](https://torchrunx.readthedocs.io/stable/api.html) and [Advanced Usage Guide](https://torchrunx.readthedocs.io/stable/advanced.html) for many more capabilities!** -This library uniquely offers: +## `torchrunx` uniquely offers -1. **An automatic launcher that just works for everyone** 🚀 +1. **An automatic launcher that "just works" for everyone** 🚀 -No system-specific dependencies and orchestration for *automatic* multi-node distribution. `torchrunx` is an SSH-based, pure-Python library that is universally easy to install. +> `torchrunx` is an SSH-based, pure-Python library that is universally easy to install.
+> No system-specific dependencies and orchestration for *automatic* multi-node distribution. 2. **Conventional CLI commands** 🖥️ -Run familiar commands, like `python my_script.py ...`, and customize arguments as you wish. - -In contrast to launchers that override the `python` executable in a cumbersome way (e.g. `torchrun --nproc_per_node=2 --nnodes=2 --node_rank=0 --master_addr=100.43.331.111 --master_port=1234 my_script.py ...`). +> Run familiar commands, like `python my_script.py ...`, and customize arguments as you wish. +> +> Other launchers override `python` in a cumbersome way: e.g. `torchrun --nproc_per_node=2 --nnodes=2 --node_rank=0 --master_addr=100.43.331.111 --master_port=1234 my_script.py ...`. 3. **Support for more complex workflows in a single script** 🎛️ -Your workflow may have independent steps that need different parallelizations (e.g. training on 8 GPUs, testing on 1 GPU; comparing throughput on 4, then 8 GPUs; and so forth). CLI-based launchers naively parallelize the entire script for exactly *N* GPUs. In contrast, our library treats these steps in a modular way and permits *degrees* of parallelism in a single script. - -We clean memory leaks (which are unfortunately common in PyTorch) as we go, so previous steps won't crash or adversely affect future steps. +> Your workflow may have independent steps that need different parallelizations (e.g. training on 8 GPUs, testing on 1 GPU; comparing throughput on 4, then 8 GPUs; and so forth). CLI-based launchers naively parallelize the entire script for exactly *N* GPUs. In contrast, our library treats these steps in a modular way and permits *degrees* of parallelism in a single script. +> +> +> We clean memory leaks as we go, so previous steps won't crash or adversely affect future steps. 4. **Better handling of system failures. No more zombies!** 🧟 -With `torchrun`, your "work" is inherently coupled to your main Python process. If the system kills one of your workers (e.g. due to RAM OOM or segmentation faults), there is no way to fail gracefully in Python. Your processes might hang for at least 10 minutes (the NCCL timeout) or become perpetual zombies. - -`torchrunx` decouples "launcher" and "worker" processes. If the system kills a worker, our launcher immediately raises a `WorkerFailure` exception, which users can handle as they wish. We always clean up all nodes, so no more zombies! +> With `torchrun`, your "work" is inherently coupled to your main Python process. If the system kills one of your workers (e.g. due to RAM OOM or segmentation faults), there is no way to fail gracefully in Python. Your processes might hang for at least 10 minutes (the NCCL timeout) or become perpetual zombies. +> +> +> `torchrunx` decouples "launcher" and "worker" processes. If the system kills a worker, our launcher immediately raises a `WorkerFailure` exception, which users can handle as they wish. We always clean up all nodes, so no more zombies! 5. **Bonus features** 🎁 -- Fine-grained, custom handling of logging, environment variables, and exception propagation. We have nice defaults too: no more interleaved logs and irrelevant exceptions! -- No need to manually set up a [`dist.init_process_group`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) -- We automatically detect and infer settings from SLURM environments. -- Start multi-node training from Python notebooks! +> - Fine-grained, custom handling of logging, environment variables, and exception propagation. We have nice defaults too: no more interleaved logs and irrelevant exceptions! +> - No need to manually set up a [`dist.init_process_group`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) +> - Automatic detection of SLURM environments. +> - Start multi-node training from Python notebooks! -On our [roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement): higher-order parallelism, support for debuggers, fuller typing, and more! +**On our [roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement): higher-order parallelism, support for debuggers, fuller typing, and more!** ## Examples with other libraries From 90a4ce9e0b1af40638fec4861e3ca3a4ef58f0f7 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 29 Dec 2024 00:19:24 -0500 Subject: [PATCH 031/141] switched `func_args` type to `tuple` --- src/torchrunx/launcher.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 4fa24e1a..e2d102ca 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -62,7 +62,7 @@ class Launcher: def run( # noqa: C901, PLR0912 self, func: Callable, - func_args: tuple[Any] | None = None, + func_args: tuple | None = None, func_kwargs: dict[str, Any] | None = None, handler_factory: Callable[[], list[Handler]] | Literal["auto"] | None = "auto", ) -> LaunchResult: @@ -195,7 +195,7 @@ def run( # noqa: C901, PLR0912 def launch( func: Callable, - func_args: tuple[Any] | None = None, + func_args: tuple | None = None, func_kwargs: dict[str, Any] | None = None, hostnames: list[str] | Literal["auto", "slurm"] = "auto", workers_per_host: int | list[int] | Literal["auto", "slurm"] = "auto", From 8935e190eea7ebad7e9ce8018781cf921e2160e8 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 24 Jan 2025 15:33:07 -0500 Subject: [PATCH 032/141] added examples page --- README.md | 42 +++-------------------------------------- docs/source/examples.md | 37 ++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 39 deletions(-) create mode 100644 docs/source/examples.md diff --git a/README.md b/README.md index f9a3e296..a11673fa 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,6 @@ def train(model: nn.Module, num_steps: int = 5) -> nn.Module | None:
```python -import torch.nn as nn import torchrunx results = torchrunx.launch( @@ -74,11 +73,13 @@ results = torchrunx.launch( ) trained_model: nn.Module = results.rank(0) -torch.save(trained_model.state_dict(), "model.pth") +torch.save(trained_model.state_dict(), "output/model.pth") ``` **Refer to our [API](https://torchrunx.readthedocs.io/stable/api.html) and [Advanced Usage Guide](https://torchrunx.readthedocs.io/stable/advanced.html) for many more capabilities!** +**See [Examples](https://torchrunx.readthedocs.io/stable/examples.html) for usage with several deep learning libraries (HF Trainer, PyTorch Lightning, etc).** + ## `torchrunx` uniquely offers 1. **An automatic launcher that "just works" for everyone** 🚀 @@ -114,40 +115,3 @@ torch.save(trained_model.state_dict(), "model.pth") > - Start multi-node training from Python notebooks! **On our [roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement): higher-order parallelism, support for debuggers, fuller typing, and more!** - -## Examples with other libraries - -
- Accelerate - - ```python - ``` -
- -
- HF Trainer - - ```python - ``` -
- -
- Deepspeed - - ```python - ``` -
- -
- PyTorch Lightning - - ```python - ``` -
- -
- MosaicML Composer - - ```python - ``` -
diff --git a/docs/source/examples.md b/docs/source/examples.md new file mode 100644 index 00000000..4f072f20 --- /dev/null +++ b/docs/source/examples.md @@ -0,0 +1,37 @@ + +## Using `torchrunx` with deep learning libraries + +
+ Accelerate + + ```python + ``` +
+ +
+ HF Trainer + + ```python + ``` +
+ +
+ Deepspeed + + ```python + ``` +
+ +
+ PyTorch Lightning + + ```python + ``` +
+ +
+ MosaicML Composer + + ```python + ``` +
From 40548d413f0b451846577fc621b6abe857aebee0 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 24 Jan 2025 16:20:41 -0500 Subject: [PATCH 033/141] docs: added HF trainer example; temp disabled github plugin --- docs/conf.py | 2 +- docs/source/examples.md | 66 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index cc51e831..e258b667 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,7 +13,7 @@ "sphinx.ext.napoleon", # for google style docstrings "sphinx.ext.linkcode", # link to github source "sphinx_toolbox.sidebar_links", - "sphinx_toolbox.github", + # "sphinx_toolbox.github", ] maximum_signature_line_length = 100 diff --git a/docs/source/examples.md b/docs/source/examples.md index 4f072f20..07729f49 100644 --- a/docs/source/examples.md +++ b/docs/source/examples.md @@ -12,6 +12,72 @@ HF Trainer ```python + import torch + from datasets import load_dataset + from torch import nn + from torch.utils.data import Dataset + from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments + + + class GPT2CausalLMDataset(Dataset): + def __init__(self, text_dataset): + self.dataset = text_dataset + self.tokenizer = AutoTokenizer.from_pretrained("gpt2") + self.tokenizer.pad_token = self.tokenizer.eos_token + self.max_length = 1024 + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, idx): + encoded = self.tokenizer( + self.dataset[idx]["text"], + max_length=self.max_length, + truncation=True, + padding="max_length", + return_tensors="pt", + ) + + input_ids = encoded.input_ids.squeeze() + labels = input_ids.clone() + + return {"input_ids": input_ids, "labels": labels} + + + def train(): + model = AutoModelForCausalLM.from_pretrained("gpt2") + wikitext_train = load_dataset( + "Salesforce/wikitext", name="wikitext-2-v1", split="train" + ) + train_dataset = GPT2CausalLMDataset(text_dataset=wikitext_train) + + trainer = Trainer( + model=model, + args=TrainingArguments( + output_dir="output", + per_device_train_batch_size=16, + max_steps=10, + ), + train_dataset=train_dataset, + ) + + trainer.train() + + return model + ``` + + ```python + import torchrunx + + if __name__ == "__main__": + results = torchrunx.launch( + func=train, + hostnames=["localhost"], + workers_per_host=1, + ) + + trained_model: nn.Module = results.rank(0) + torch.save(trained_model.state_dict(), "output/model.pth") ```
From dc663b39831a89e6a83585271c7b16d99e5de1c7 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 24 Jan 2025 16:39:10 -0500 Subject: [PATCH 034/141] enable sphinx github plugin --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index e258b667..cc51e831 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,7 +13,7 @@ "sphinx.ext.napoleon", # for google style docstrings "sphinx.ext.linkcode", # link to github source "sphinx_toolbox.sidebar_links", - # "sphinx_toolbox.github", + "sphinx_toolbox.github", ] maximum_signature_line_length = 100 From 83b8d325c18a9f9fa2589061e0dda6a6a7914c92 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 24 Jan 2025 23:06:16 -0500 Subject: [PATCH 035/141] sphinx --jobs 1 --- docs/.readthedocs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index 4aa30e7d..061ecd79 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -8,4 +8,4 @@ build: - asdf plugin add uv - asdf install uv latest - asdf global uv latest - - uv run --group docs python -m sphinx --builder html --jobs auto --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source $READTHEDOCS_OUTPUT/html + - uv run --group docs python -m sphinx --builder html --jobs 1 --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source $READTHEDOCS_OUTPUT/html From 6a2ff8937f437fc1e2cfae0857501118696d0f03 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 24 Jan 2025 23:18:25 -0500 Subject: [PATCH 036/141] docs: "examples" in sidebar --- docs/source/index.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/index.md b/docs/source/index.md index 00e2b62c..a49545d6 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -9,6 +9,7 @@ api advanced +examples how_it_works contributing ``` From ae10612254a932b52f5cfa27e8898c3d0a01968e Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 25 Jan 2025 21:12:55 -0500 Subject: [PATCH 037/141] updated formatting --- README.md | 11 +++- docs/source/examples.md | 143 +++++++++++++++++----------------------- 2 files changed, 69 insertions(+), 85 deletions(-) diff --git a/README.md b/README.md index a11673fa..72de6dac 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ By [Apoorv Khandelwal](http://apoorvkh.com) and [Peter Curtin](https://github.co --- -**`torchrunx`** is a more convenient, *functional* replacement for CLI-based distributed PyTorch launchers (`torchrun`, `accelerate launch`, `deepspeed`, etc). +**`torchrunx`** is a more convenient, *functional* replacement for CLI-based distributed PyTorch launchers, like `torchrun`, `accelerate launch`, and `deepspeed`. ```bash pip install torchrunx @@ -76,9 +76,14 @@ trained_model: nn.Module = results.rank(0) torch.save(trained_model.state_dict(), "output/model.pth") ``` -**Refer to our [API](https://torchrunx.readthedocs.io/stable/api.html) and [Advanced Usage Guide](https://torchrunx.readthedocs.io/stable/advanced.html) for many more capabilities!** +**See [Examples](https://torchrunx.readthedocs.io/stable/examples.html) for full examples (training GPT-2) with several deep learning libraries:** + - Accelerate + - HF Trainer + - DeepSpeed + - PyTorch Lightning + - MosaicML Composer -**See [Examples](https://torchrunx.readthedocs.io/stable/examples.html) for usage with several deep learning libraries (HF Trainer, PyTorch Lightning, etc).** +**Refer to our [API](https://torchrunx.readthedocs.io/stable/api.html) and [Advanced Usage Guide](https://torchrunx.readthedocs.io/stable/advanced.html) for many more capabilities!** ## `torchrunx` uniquely offers diff --git a/docs/source/examples.md b/docs/source/examples.md index 07729f49..68a88eab 100644 --- a/docs/source/examples.md +++ b/docs/source/examples.md @@ -1,103 +1,82 @@ +# Examples -## Using `torchrunx` with deep learning libraries +## Using `torchrunx` with other deep learning libraries -
- Accelerate +## Accelerate - ```python - ``` -
+## HF Trainer -
- HF Trainer +```python +import torch +from datasets import load_dataset +from torch import nn +from torch.utils.data import Dataset +from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments - ```python - import torch - from datasets import load_dataset - from torch import nn - from torch.utils.data import Dataset - from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments +class GPT2CausalLMDataset(Dataset): + def __init__(self, text_dataset): + self.dataset = text_dataset + self.tokenizer = AutoTokenizer.from_pretrained("gpt2") + self.tokenizer.pad_token = self.tokenizer.eos_token + self.max_length = 1024 - class GPT2CausalLMDataset(Dataset): - def __init__(self, text_dataset): - self.dataset = text_dataset - self.tokenizer = AutoTokenizer.from_pretrained("gpt2") - self.tokenizer.pad_token = self.tokenizer.eos_token - self.max_length = 1024 + def __len__(self): + return len(self.dataset) - def __len__(self): - return len(self.dataset) + def __getitem__(self, idx): + encoded = self.tokenizer( + self.dataset[idx]["text"], + max_length=self.max_length, + truncation=True, + padding="max_length", + return_tensors="pt", + ) - def __getitem__(self, idx): - encoded = self.tokenizer( - self.dataset[idx]["text"], - max_length=self.max_length, - truncation=True, - padding="max_length", - return_tensors="pt", - ) + input_ids = encoded.input_ids.squeeze() + labels = input_ids.clone() - input_ids = encoded.input_ids.squeeze() - labels = input_ids.clone() + return {"input_ids": input_ids, "labels": labels} - return {"input_ids": input_ids, "labels": labels} +def train(): + model = AutoModelForCausalLM.from_pretrained("gpt2") + wikitext_train = load_dataset( + "Salesforce/wikitext", name="wikitext-2-v1", split="train" + ) + train_dataset = GPT2CausalLMDataset(text_dataset=wikitext_train) - def train(): - model = AutoModelForCausalLM.from_pretrained("gpt2") - wikitext_train = load_dataset( - "Salesforce/wikitext", name="wikitext-2-v1", split="train" - ) - train_dataset = GPT2CausalLMDataset(text_dataset=wikitext_train) + trainer = Trainer( + model=model, + args=TrainingArguments( + output_dir="output", + per_device_train_batch_size=16, + max_steps=10, + ), + train_dataset=train_dataset, + ) - trainer = Trainer( - model=model, - args=TrainingArguments( - output_dir="output", - per_device_train_batch_size=16, - max_steps=10, - ), - train_dataset=train_dataset, - ) + trainer.train() - trainer.train() + return model +``` - return model - ``` +```python +import torchrunx - ```python - import torchrunx +if __name__ == "__main__": + results = torchrunx.launch( + func=train, + hostnames=["localhost"], + workers_per_host=1, + ) - if __name__ == "__main__": - results = torchrunx.launch( - func=train, - hostnames=["localhost"], - workers_per_host=1, - ) + trained_model: nn.Module = results.rank(0) + torch.save(trained_model.state_dict(), "output/model.pth") +``` - trained_model: nn.Module = results.rank(0) - torch.save(trained_model.state_dict(), "output/model.pth") - ``` -
+## DeepSpeed -
- Deepspeed +## PyTorch Lightning - ```python - ``` -
- -
- PyTorch Lightning - - ```python - ``` -
- -
- MosaicML Composer - - ```python - ``` -
+## MosaicML Composer From 726929a4e41965d04e4cc1dcff0f649c452c066e Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 25 Jan 2025 23:08:51 -0500 Subject: [PATCH 038/141] updates to docs --- docs/source/advanced.md | 11 +---------- docs/source/api.md | 13 +++++++++++++ docs/source/examples.md | 12 +++++++----- 3 files changed, 21 insertions(+), 15 deletions(-) diff --git a/docs/source/advanced.md b/docs/source/advanced.md index 7a79d0e0..a0eedd84 100644 --- a/docs/source/advanced.md +++ b/docs/source/advanced.md @@ -25,16 +25,7 @@ print(f'Accuracy: {accuracy}') {mod}`torchrunx.launch` is self-cleaning: all processes are terminated (and the used memory is completely released) before the subsequent invocation. -## Launcher class - -We provide the {mod}`torchrunx.Launcher` class as an alias to {mod}`torchrunx.launch`. - -```{eval-rst} -.. autoclass:: torchrunx.Launcher - :members: -``` - -### CLI integration +## CLI integration We can use {mod}`torchrunx.Launcher` to populate arguments from the CLI (e.g. with [tyro](https://brentyi.github.io/tyro/)): diff --git a/docs/source/api.md b/docs/source/api.md index 1025621d..169602d3 100644 --- a/docs/source/api.md +++ b/docs/source/api.md @@ -1,14 +1,27 @@ # API +## Launching functions + ```{eval-rst} .. autofunction:: torchrunx.launch(func: Callable, ...) ``` +We provide the {mod}`torchrunx.Launcher` class as an alias to {mod}`torchrunx.launch`. + +```{eval-rst} +.. autoclass:: torchrunx.Launcher + :members: +``` + +## Results + ```{eval-rst} .. autoclass:: torchrunx.LaunchResult :members: ``` +## Exceptions + ```{eval-rst} .. autoclass:: torchrunx.AgentFailedError ``` diff --git a/docs/source/examples.md b/docs/source/examples.md index 68a88eab..2563a545 100644 --- a/docs/source/examples.md +++ b/docs/source/examples.md @@ -2,9 +2,11 @@ ## Using `torchrunx` with other deep learning libraries -## Accelerate +We will show examples of how to use `torchrunx` to train a GPT-2 (small) with text data from wikitext. -## HF Trainer +### Accelerate + +### HF Trainer ```python import torch @@ -75,8 +77,8 @@ if __name__ == "__main__": torch.save(trained_model.state_dict(), "output/model.pth") ``` -## DeepSpeed +### DeepSpeed -## PyTorch Lightning +### PyTorch Lightning -## MosaicML Composer +### MosaicML Composer From 645745eece8a8da91b200a83318ad6527a310a68 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 26 Jan 2025 14:21:10 -0500 Subject: [PATCH 039/141] docs: build-time copy of README in docs/source --- .gitignore | 1 + README.md | 35 +++++++++++++++++------------------ docs/.readthedocs.yaml | 2 +- docs/conf.py | 11 +++++++++++ docs/source/index.md | 2 +- 5 files changed, 31 insertions(+), 20 deletions(-) diff --git a/.gitignore b/.gitignore index af2731dd..2566ec82 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +docs/source/README.md torchrunx_logs/ .pixi/ .ruff_cache/ diff --git a/README.md b/README.md index 72de6dac..21a5a14c 100644 --- a/README.md +++ b/README.md @@ -7,13 +7,13 @@ [![Docs](https://readthedocs.org/projects/torchrunx/badge/?version=stable)](https://torchrunx.readthedocs.io) [![GitHub License](https://img.shields.io/github/license/apoorvkh/torchrunx)](https://github.com/apoorvkh/torchrunx/blob/main/LICENSE) -By [Apoorv Khandelwal](http://apoorvkh.com) and [Peter Curtin](https://github.com/pmcurtin) +By [Apoorv Khandelwal](https://apoorvkh.com) and [Peter Curtin](https://github.com/pmcurtin) **The easiest way to run PyTorch on multiple GPUs or machines.** --- -**`torchrunx`** is a more convenient, *functional* replacement for CLI-based distributed PyTorch launchers, like `torchrun`, `accelerate launch`, and `deepspeed`. +**`torchrunx`** is a *functional* utility for distributing PyTorch code across devices. This is a [more convenient, robust, and featureful](#torchrunx-uniquely-offers) alternative to CLI-based launchers, like `torchrun`, `accelerate launch`, and `deepspeed`. ```bash pip install torchrunx @@ -21,24 +21,17 @@ pip install torchrunx Requires: Linux (+ SSH & shared filesystem if using multiple machines) -**Example: Training a model on 2 machines with 2 GPUs each** +--- + +**Vanilla Example: Training a model on 2 machines with 2 GPUs each** + +Dummy distributed training function: ```python import os import torch import torch.nn as nn -def train(model: nn.Module, num_steps: int) -> nn.Module | None: - # ... - rank = int(os.environ['RANK']) - if rank == 0: - return model.cpu() -``` - -
- Training function (expand) - -```python def train(model: nn.Module, num_steps: int = 5) -> nn.Module | None: rank = int(os.environ['RANK']) local_rank = int(os.environ['LOCAL_RANK']) @@ -49,15 +42,19 @@ def train(model: nn.Module, num_steps: int = 5) -> nn.Module | None: for step in range(10): optimizer.zero_grad() - outputs = ddp_model(torch.randn(5, 10)) + + inputs = torch.randn(5, 10).to(local_rank) labels = torch.randn(5, 10).to(local_rank) + outputs = ddp_model(inputs) + torch.nn.functional.mse_loss(outputs, labels).backward() optimizer.step() if rank == 0: return model.cpu() ``` -
+ +Launching training with `torchrunx`: ```python import torchrunx @@ -76,14 +73,16 @@ trained_model: nn.Module = results.rank(0) torch.save(trained_model.state_dict(), "output/model.pth") ``` -**See [Examples](https://torchrunx.readthedocs.io/stable/examples.html) for full examples (training GPT-2) with several deep learning libraries:** +**See [more examples](./docs/source/examples.md) that showcase training GPT-2 using the following deep learning libraries:** - Accelerate - HF Trainer - DeepSpeed - PyTorch Lightning - MosaicML Composer -**Refer to our [API](https://torchrunx.readthedocs.io/stable/api.html) and [Advanced Usage Guide](https://torchrunx.readthedocs.io/stable/advanced.html) for many more capabilities!** +**Refer to our [API](./docs/source/api.md) and [Advanced Usage Guide](./docs/source/advanced.md) for many more capabilities!** + +--- ## `torchrunx` uniquely offers diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index 061ecd79..d819dfbd 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -8,4 +8,4 @@ build: - asdf plugin add uv - asdf install uv latest - asdf global uv latest - - uv run --group docs python -m sphinx --builder html --jobs 1 --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source $READTHEDOCS_OUTPUT/html + - uv run --group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source $READTHEDOCS_OUTPUT/html diff --git a/docs/conf.py b/docs/conf.py index cc51e831..5ff6a7c7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -25,6 +25,17 @@ "numpy": ("https://numpy.org/doc/stable", None), } +## Copy README.md to docs/source/ +# Adjust relative paths to "docs/source/" files + +with open("../README.md", "r") as f: + readme_str = f.read().replace("docs/source/", "") + +with open("source/README.md", "w") as f: + f.write(readme_str) + +## End of "Copy README.md to docs/source/" + ## sphinx.ext.linkcode configuration # Link code to Github source # From: https://github.com/scikit-learn/scikit-learn/blob/main/doc/sphinxext/github_link.py diff --git a/docs/source/index.md b/docs/source/index.md index a49545d6..4cd7edf8 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -1,5 +1,5 @@ ```{eval-rst} -.. include:: ../../README.md +.. include:: ./README.md :parser: myst_parser.sphinx_ ``` From 35ec7d0fc147d52bb25f20e1781e36e1003be84d Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 26 Jan 2025 14:26:11 -0500 Subject: [PATCH 040/141] docs: migrated readme docs links to https://stable --- README.md | 4 ++-- docs/conf.py | 5 +---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 21a5a14c..22d864c3 100644 --- a/README.md +++ b/README.md @@ -73,14 +73,14 @@ trained_model: nn.Module = results.rank(0) torch.save(trained_model.state_dict(), "output/model.pth") ``` -**See [more examples](./docs/source/examples.md) that showcase training GPT-2 using the following deep learning libraries:** +**See [more examples](https://torchrunx.readthedocs.io/stable/examples.html) that showcase training GPT-2 using the following deep learning libraries:** - Accelerate - HF Trainer - DeepSpeed - PyTorch Lightning - MosaicML Composer -**Refer to our [API](./docs/source/api.md) and [Advanced Usage Guide](./docs/source/advanced.md) for many more capabilities!** +**Refer to our [API](https://torchrunx.readthedocs.io/stable/api.html) and [Advanced Usage Guide](https://torchrunx.readthedocs.io/stable/advanced.html) for many more capabilities!** --- diff --git a/docs/conf.py b/docs/conf.py index 5ff6a7c7..19663d50 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -26,14 +26,11 @@ } ## Copy README.md to docs/source/ -# Adjust relative paths to "docs/source/" files - with open("../README.md", "r") as f: - readme_str = f.read().replace("docs/source/", "") + readme_str = f.read().replace("https://torchrunx.readthedocs.io/stable/", "./") with open("source/README.md", "w") as f: f.write(readme_str) - ## End of "Copy README.md to docs/source/" ## sphinx.ext.linkcode configuration From 8c38bbd58ccdc1ac084998583fbd362f732f096c Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 26 Jan 2025 14:39:38 -0500 Subject: [PATCH 041/141] revert link replace --- docs/conf.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 19663d50..06e89f6c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -26,11 +26,8 @@ } ## Copy README.md to docs/source/ -with open("../README.md", "r") as f: - readme_str = f.read().replace("https://torchrunx.readthedocs.io/stable/", "./") - -with open("source/README.md", "w") as f: - f.write(readme_str) +import shutil +shutil.copyfile("../README.md", "source/README.md") ## End of "Copy README.md to docs/source/" ## sphinx.ext.linkcode configuration From eaa7be23075afd8a313abb8e0ec32f46f0edeb64 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 26 Jan 2025 23:44:53 -0500 Subject: [PATCH 042/141] update readme --- README.md | 4 ++-- docs/source/examples.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 22d864c3..0a956e09 100644 --- a/README.md +++ b/README.md @@ -73,9 +73,9 @@ trained_model: nn.Module = results.rank(0) torch.save(trained_model.state_dict(), "output/model.pth") ``` -**See [more examples](https://torchrunx.readthedocs.io/stable/examples.html) that showcase training GPT-2 using the following deep learning libraries:** +**See [training GPT-2 on WikiText](https://torchrunx.readthedocs.io/stable/examples.html#training-gpt-2-on-wikitext) for more examples using the following deep learning libraries:** - Accelerate - - HF Trainer + - HF Transformers - DeepSpeed - PyTorch Lightning - MosaicML Composer diff --git a/docs/source/examples.md b/docs/source/examples.md index 2563a545..4d2338f6 100644 --- a/docs/source/examples.md +++ b/docs/source/examples.md @@ -1,8 +1,8 @@ # Examples -## Using `torchrunx` with other deep learning libraries +## Training GPT-2 on WikiText -We will show examples of how to use `torchrunx` to train a GPT-2 (small) with text data from wikitext. +We will show examples of how to use `torchrunx` alongside several deep learning libraries to train a GPT-2 (small) model with text data from WikiText. ### Accelerate From be485e900503509f69cdbe0a3260f05e7c1dab69 Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Wed, 29 Jan 2025 11:56:24 -0500 Subject: [PATCH 043/141] accelerator example --- acc.py | 79 +++++++++++++++++++++++++++++++++++++++ docs/source/examples.md | 83 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 162 insertions(+) create mode 100644 acc.py diff --git a/acc.py b/acc.py new file mode 100644 index 00000000..3e3f312f --- /dev/null +++ b/acc.py @@ -0,0 +1,79 @@ +from pathlib import Path + +import torch +from accelerate import Accelerator +from datasets import load_dataset +from torch import nn +from torch.utils.data import Dataset +from transformers import AutoModelForCausalLM, AutoTokenizer + +import torchrunx + + +class GPT2CausalLMDataset(Dataset): + def __init__(self, text_dataset): + self.dataset = text_dataset + self.tokenizer = AutoTokenizer.from_pretrained("gpt2") + self.tokenizer.pad_token = self.tokenizer.eos_token + self.max_length = 1024 + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, idx): + encoded = self.tokenizer( + self.dataset[idx]["text"], + max_length=self.max_length, + truncation=True, + padding="max_length", + return_tensors="pt", + ) + + input_ids = encoded.input_ids.squeeze() + attention_mask = encoded.attention_mask.squeeze() + labels = input_ids.clone() + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "labels": labels, + } + + +def train(): + accelerator = Accelerator() + + model = AutoModelForCausalLM.from_pretrained("gpt2") + optimizer = torch.optim.Adam(model.parameters()) + wikitext_train = load_dataset("Salesforce/wikitext", "wikitext-2-v1", split="train") + train_dataset = GPT2CausalLMDataset(text_dataset=wikitext_train) + + loader = torch.utils.data.DataLoader(train_dataset, batch_size=8) + model, optimizer, loader = accelerator.prepare(model, optimizer, loader) + + model.train() + for batch_idx, batch in enumerate(loader): + if batch_idx == 10: + break + print(f"Step {batch_idx}") + device_batch = {k: v.to(accelerator.device) for k, v in batch.items()} + optimizer.zero_grad() + + loss = model(**device_batch).loss + accelerator.backward(loss) + + optimizer.step() + + return model + + +if __name__ == "__main__": + Path("output").mkdir(exist_ok=True) + results = torchrunx.launch( + func=train, + hostnames=["localhost"], + workers_per_host=2, + ) + + trained_model: nn.Module = results.rank(0) + torch.save(trained_model.state_dict(), "output/model.pth") diff --git a/docs/source/examples.md b/docs/source/examples.md index 4d2338f6..bead0122 100644 --- a/docs/source/examples.md +++ b/docs/source/examples.md @@ -6,6 +6,89 @@ We will show examples of how to use `torchrunx` alongside several deep learning ### Accelerate +```python +from pathlib import Path + +import torch +from accelerate import Accelerator +from datasets import load_dataset +from torch import nn +from torch.utils.data import Dataset +from transformers import AutoModelForCausalLM, AutoTokenizer + + +class GPT2CausalLMDataset(Dataset): + def __init__(self, text_dataset): + self.dataset = text_dataset + self.tokenizer = AutoTokenizer.from_pretrained("gpt2") + self.tokenizer.pad_token = self.tokenizer.eos_token + self.max_length = 1024 + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, idx): + encoded = self.tokenizer( + self.dataset[idx]["text"], + max_length=self.max_length, + truncation=True, + padding="max_length", + return_tensors="pt", + ) + + input_ids = encoded.input_ids.squeeze() + attention_mask = encoded.attention_mask.squeeze() + labels = input_ids.clone() + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "labels": labels, + } + + +def train(): + accelerator = Accelerator() + + model = AutoModelForCausalLM.from_pretrained("gpt2") + optimizer = torch.optim.Adam(model.parameters()) + wikitext_train = load_dataset("Salesforce/wikitext", "wikitext-2-v1", split="train") + train_dataset = GPT2CausalLMDataset(text_dataset=wikitext_train) + + loader = torch.utils.data.DataLoader(train_dataset, batch_size=8) + model, optimizer, loader = accelerator.prepare(model, optimizer, loader) + + model.train() + for batch_idx, batch in enumerate(loader): + if batch_idx == 10: + break + print(f"Step {batch_idx}") + device_batch = {k: v.to(accelerator.device) for k, v in batch.items()} + optimizer.zero_grad() + + loss = model(**device_batch).loss + accelerator.backward(loss) + + optimizer.step() + + return model +``` + +```python +import torchrunx + +if __name__ == "__main__": + Path("output").mkdir(exist_ok=True) + results = torchrunx.launch( + func=train, + hostnames=["localhost"], + workers_per_host=1, + ) + + trained_model: nn.Module = results.rank(0) + torch.save(trained_model.state_dict(), "output/model.pth") +``` + ### HF Trainer ```python From f72f6494bfbb07878a6f368b7af45eaf4b154c60 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 31 Jan 2025 16:31:22 -0500 Subject: [PATCH 044/141] added tensorboard export for accelerate example (in file) --- acc.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/acc.py b/acc.py index 3e3f312f..af304c6f 100644 --- a/acc.py +++ b/acc.py @@ -6,6 +6,7 @@ from torch import nn from torch.utils.data import Dataset from transformers import AutoModelForCausalLM, AutoTokenizer +from tqdm import tqdm import torchrunx @@ -41,7 +42,9 @@ def __getitem__(self, idx): def train(): - accelerator = Accelerator() + accelerator = Accelerator(log_with="tensorboard", project_dir="output_dir") + + accelerator.init_trackers("example_project") model = AutoModelForCausalLM.from_pretrained("gpt2") optimizer = torch.optim.Adam(model.parameters()) @@ -52,19 +55,21 @@ def train(): model, optimizer, loader = accelerator.prepare(model, optimizer, loader) model.train() - for batch_idx, batch in enumerate(loader): - if batch_idx == 10: - break - print(f"Step {batch_idx}") + for batch_idx, batch in tqdm(enumerate(loader)): device_batch = {k: v.to(accelerator.device) for k, v in batch.items()} optimizer.zero_grad() loss = model(**device_batch).loss + + accelerator.log({"train_loss": loss.item()}, step=batch_idx) + accelerator.backward(loss) optimizer.step() - return model + accelerator.end_training() + + return accelerator.unwrap_model(model) if __name__ == "__main__": From 3e1e8872e8fec72a82ee454e441e042a43e41712 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 1 Feb 2025 13:26:00 -0500 Subject: [PATCH 045/141] updated HF Trainer example --- docs/source/examples.md | 94 +++++++++++++++++++++++------------------ 1 file changed, 53 insertions(+), 41 deletions(-) diff --git a/docs/source/examples.md b/docs/source/examples.md index bead0122..9294fdfc 100644 --- a/docs/source/examples.md +++ b/docs/source/examples.md @@ -92,72 +92,84 @@ if __name__ == "__main__": ### HF Trainer ```python -import torch -from datasets import load_dataset -from torch import nn -from torch.utils.data import Dataset -from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments +from __future__ import annotations +import os -class GPT2CausalLMDataset(Dataset): - def __init__(self, text_dataset): - self.dataset = text_dataset - self.tokenizer = AutoTokenizer.from_pretrained("gpt2") - self.tokenizer.pad_token = self.tokenizer.eos_token - self.max_length = 1024 - - def __len__(self): - return len(self.dataset) - - def __getitem__(self, idx): - encoded = self.tokenizer( - self.dataset[idx]["text"], - max_length=self.max_length, - truncation=True, - padding="max_length", - return_tensors="pt", - ) +from datasets import Dataset, load_dataset +from transformers import ( + AutoConfig, + AutoModelForCausalLM, + AutoTokenizer, + PreTrainedModel, + Trainer, + TrainingArguments, +) - input_ids = encoded.input_ids.squeeze() - labels = input_ids.clone() - return {"input_ids": input_ids, "labels": labels} +def build_model() -> PreTrainedModel: + config = AutoConfig.from_pretrained("gpt2") + model = AutoModelForCausalLM.from_config(config) + return model -def train(): - model = AutoModelForCausalLM.from_pretrained("gpt2") - wikitext_train = load_dataset( - "Salesforce/wikitext", name="wikitext-2-v1", split="train" +def load_training_data() -> Dataset: + os.environ["TOKENIZERS_PARALLELISM"] = "false" # to suppress warnings + tokenizer = AutoTokenizer.from_pretrained("gpt2") + tokenizer.pad_token = tokenizer.eos_token + + return ( + load_dataset("Salesforce/wikitext", name="wikitext-2-v1", split="train") + .select(range(8)) + .map( + lambda x: tokenizer( + x["text"], + max_length=1024, + truncation=True, + padding="max_length", + ), + batched=True, + remove_columns=["text"], + ) + .map(lambda x: {"labels": x["input_ids"]}) ) - train_dataset = GPT2CausalLMDataset(text_dataset=wikitext_train) + +def train( + model: PreTrainedModel, training_args: TrainingArguments, train_dataset: Dataset +) -> PreTrainedModel | None: trainer = Trainer( model=model, - args=TrainingArguments( - output_dir="output", - per_device_train_batch_size=16, - max_steps=10, - ), + args=training_args, train_dataset=train_dataset, ) - trainer.train() - return model + if int(os.environ["RANK"]) == 0: + return model ``` ```python import torchrunx + if __name__ == "__main__": + model = build_model() + training_args = TrainingArguments( + output_dir="output", + per_device_train_batch_size=2, + report_to="tensorboard", + ) + train_dataset = load_training_data() + results = torchrunx.launch( func=train, + func_args=(model, training_args, train_dataset), hostnames=["localhost"], - workers_per_host=1, + workers_per_host=2, ) - trained_model: nn.Module = results.rank(0) - torch.save(trained_model.state_dict(), "output/model.pth") + model = results.rank(0) ``` ### DeepSpeed From 8afb01976475c665ac4153e17cc5962d42f58cec Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 1 Feb 2025 23:08:24 -0500 Subject: [PATCH 046/141] examples structure in docs --- docs/source/examples/accelerate.md | 6 + .../scripts/torchrunx_accelerate.py} | 116 +++--------------- .../scripts/torchrunx_transformers.py | 84 +++++++++++++ docs/source/examples/transformers.md | 6 + docs/source/index.md | 21 ---- docs/source/index.rst | 21 ++++ 6 files changed, 131 insertions(+), 123 deletions(-) create mode 100644 docs/source/examples/accelerate.md rename docs/source/{examples.md => examples/scripts/torchrunx_accelerate.py} (50%) create mode 100644 docs/source/examples/scripts/torchrunx_transformers.py create mode 100644 docs/source/examples/transformers.md delete mode 100644 docs/source/index.md create mode 100644 docs/source/index.rst diff --git a/docs/source/examples/accelerate.md b/docs/source/examples/accelerate.md new file mode 100644 index 00000000..f7c4a22d --- /dev/null +++ b/docs/source/examples/accelerate.md @@ -0,0 +1,6 @@ +# Accelerate + +```{eval-rst} +.. literalinclude:: ./scripts/torchrunx_accelerate.py + :start-after: # [docs:include] +``` diff --git a/docs/source/examples.md b/docs/source/examples/scripts/torchrunx_accelerate.py similarity index 50% rename from docs/source/examples.md rename to docs/source/examples/scripts/torchrunx_accelerate.py index 9294fdfc..fb2228c0 100644 --- a/docs/source/examples.md +++ b/docs/source/examples/scripts/torchrunx_accelerate.py @@ -1,14 +1,19 @@ -# Examples - -## Training GPT-2 on WikiText - -We will show examples of how to use `torchrunx` alongside several deep learning libraries to train a GPT-2 (small) model with text data from WikiText. - -### Accelerate - -```python +# /// script +# requires-python = ">=3.12" +# dependencies = [ +# "accelerate", +# "datasets", +# "tensorboard", +# "torch", +# "torchrunx", +# "transformers", +# ] +# /// + +# [docs:include] from pathlib import Path +import torchrunx import torch from accelerate import Accelerator from datasets import load_dataset @@ -72,10 +77,7 @@ def train(): optimizer.step() return model -``` -```python -import torchrunx if __name__ == "__main__": Path("output").mkdir(exist_ok=True) @@ -87,93 +89,3 @@ def train(): trained_model: nn.Module = results.rank(0) torch.save(trained_model.state_dict(), "output/model.pth") -``` - -### HF Trainer - -```python -from __future__ import annotations - -import os - -from datasets import Dataset, load_dataset -from transformers import ( - AutoConfig, - AutoModelForCausalLM, - AutoTokenizer, - PreTrainedModel, - Trainer, - TrainingArguments, -) - - -def build_model() -> PreTrainedModel: - config = AutoConfig.from_pretrained("gpt2") - model = AutoModelForCausalLM.from_config(config) - return model - - -def load_training_data() -> Dataset: - os.environ["TOKENIZERS_PARALLELISM"] = "false" # to suppress warnings - tokenizer = AutoTokenizer.from_pretrained("gpt2") - tokenizer.pad_token = tokenizer.eos_token - - return ( - load_dataset("Salesforce/wikitext", name="wikitext-2-v1", split="train") - .select(range(8)) - .map( - lambda x: tokenizer( - x["text"], - max_length=1024, - truncation=True, - padding="max_length", - ), - batched=True, - remove_columns=["text"], - ) - .map(lambda x: {"labels": x["input_ids"]}) - ) - - -def train( - model: PreTrainedModel, training_args: TrainingArguments, train_dataset: Dataset -) -> PreTrainedModel | None: - trainer = Trainer( - model=model, - args=training_args, - train_dataset=train_dataset, - ) - trainer.train() - - if int(os.environ["RANK"]) == 0: - return model -``` - -```python -import torchrunx - - -if __name__ == "__main__": - model = build_model() - training_args = TrainingArguments( - output_dir="output", - per_device_train_batch_size=2, - report_to="tensorboard", - ) - train_dataset = load_training_data() - - results = torchrunx.launch( - func=train, - func_args=(model, training_args, train_dataset), - hostnames=["localhost"], - workers_per_host=2, - ) - - model = results.rank(0) -``` - -### DeepSpeed - -### PyTorch Lightning - -### MosaicML Composer diff --git a/docs/source/examples/scripts/torchrunx_transformers.py b/docs/source/examples/scripts/torchrunx_transformers.py new file mode 100644 index 00000000..0154e2e0 --- /dev/null +++ b/docs/source/examples/scripts/torchrunx_transformers.py @@ -0,0 +1,84 @@ +# /// script +# requires-python = ">=3.12" +# dependencies = [ +# "datasets", +# "tensorboard", +# "torchrunx", +# "transformers[torch]", +# ] +# /// + +# [docs:include] +import os +import torchrunx + +from datasets import Dataset, load_dataset +from transformers import ( + AutoConfig, + AutoModelForCausalLM, + AutoTokenizer, + PreTrainedModel, + Trainer, + TrainingArguments, +) + + +def build_model() -> PreTrainedModel: + config = AutoConfig.from_pretrained("gpt2") + model = AutoModelForCausalLM.from_config(config) + return model + + +def load_training_data() -> Dataset: + os.environ["TOKENIZERS_PARALLELISM"] = "false" # to suppress warnings + tokenizer = AutoTokenizer.from_pretrained("gpt2") + tokenizer.pad_token = tokenizer.eos_token + + return ( + load_dataset("Salesforce/wikitext", name="wikitext-2-v1", split="train") + .select(range(8)) + .map( + lambda x: tokenizer( + x["text"], + max_length=1024, + truncation=True, + padding="max_length", + ), + batched=True, + remove_columns=["text"], + ) + .map(lambda x: {"labels": x["input_ids"]}) + ) + + +def train( + model: PreTrainedModel, training_args: TrainingArguments, train_dataset: Dataset +) -> PreTrainedModel | None: + trainer = Trainer( + model=model, + args=training_args, + train_dataset=train_dataset, + ) + trainer.train() + + if int(os.environ["RANK"]) == 0: + return model + + +if __name__ == "__main__": + model = build_model() + training_args = TrainingArguments( + output_dir="output", + per_device_train_batch_size=2, + report_to="tensorboard", + ) + train_dataset = load_training_data() + + results = torchrunx.launch( + func=train, + func_args=(model, training_args, train_dataset), + hostnames=["localhost"], + workers_per_host=2, + ) + + model = results.rank(0) diff --git a/docs/source/examples/transformers.md b/docs/source/examples/transformers.md new file mode 100644 index 00000000..68d0c26e --- /dev/null +++ b/docs/source/examples/transformers.md @@ -0,0 +1,6 @@ +# Transformers + +```{eval-rst} +.. literalinclude:: ./scripts/torchrunx_transformers.py + :start-after: # [docs:include] +``` diff --git a/docs/source/index.md b/docs/source/index.md deleted file mode 100644 index 4cd7edf8..00000000 --- a/docs/source/index.md +++ /dev/null @@ -1,21 +0,0 @@ -```{eval-rst} -.. include:: ./README.md - :parser: myst_parser.sphinx_ -``` - -```{toctree} -:hidden: true -:maxdepth: 1 - -api -advanced -examples -how_it_works -contributing -``` - -```{eval-rst} -.. sidebar-links:: - :github: - :pypi: torchrunx -``` diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 00000000..48becef9 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,21 @@ +.. include:: ./README.md + :parser: myst_parser.sphinx_ + +.. toctree:: + :hidden: + + api + advanced + how_it_works + contributing + +.. toctree:: + :caption: Examples + :hidden: + :glob: + + ./examples/* + +.. sidebar-links:: + :github: + :pypi: torchrunx From 504afd8087c0668fb1f3d0a66269b780a8f2fa50 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 1 Feb 2025 23:08:42 -0500 Subject: [PATCH 047/141] remove ./acc.py --- acc.py | 84 ---------------------------------------------------------- 1 file changed, 84 deletions(-) delete mode 100644 acc.py diff --git a/acc.py b/acc.py deleted file mode 100644 index af304c6f..00000000 --- a/acc.py +++ /dev/null @@ -1,84 +0,0 @@ -from pathlib import Path - -import torch -from accelerate import Accelerator -from datasets import load_dataset -from torch import nn -from torch.utils.data import Dataset -from transformers import AutoModelForCausalLM, AutoTokenizer -from tqdm import tqdm - -import torchrunx - - -class GPT2CausalLMDataset(Dataset): - def __init__(self, text_dataset): - self.dataset = text_dataset - self.tokenizer = AutoTokenizer.from_pretrained("gpt2") - self.tokenizer.pad_token = self.tokenizer.eos_token - self.max_length = 1024 - - def __len__(self): - return len(self.dataset) - - def __getitem__(self, idx): - encoded = self.tokenizer( - self.dataset[idx]["text"], - max_length=self.max_length, - truncation=True, - padding="max_length", - return_tensors="pt", - ) - - input_ids = encoded.input_ids.squeeze() - attention_mask = encoded.attention_mask.squeeze() - labels = input_ids.clone() - - return { - "input_ids": input_ids, - "attention_mask": attention_mask, - "labels": labels, - } - - -def train(): - accelerator = Accelerator(log_with="tensorboard", project_dir="output_dir") - - accelerator.init_trackers("example_project") - - model = AutoModelForCausalLM.from_pretrained("gpt2") - optimizer = torch.optim.Adam(model.parameters()) - wikitext_train = load_dataset("Salesforce/wikitext", "wikitext-2-v1", split="train") - train_dataset = GPT2CausalLMDataset(text_dataset=wikitext_train) - - loader = torch.utils.data.DataLoader(train_dataset, batch_size=8) - model, optimizer, loader = accelerator.prepare(model, optimizer, loader) - - model.train() - for batch_idx, batch in tqdm(enumerate(loader)): - device_batch = {k: v.to(accelerator.device) for k, v in batch.items()} - optimizer.zero_grad() - - loss = model(**device_batch).loss - - accelerator.log({"train_loss": loss.item()}, step=batch_idx) - - accelerator.backward(loss) - - optimizer.step() - - accelerator.end_training() - - return accelerator.unwrap_model(model) - - -if __name__ == "__main__": - Path("output").mkdir(exist_ok=True) - results = torchrunx.launch( - func=train, - hostnames=["localhost"], - workers_per_host=2, - ) - - trained_model: nn.Module = results.rank(0) - torch.save(trained_model.state_dict(), "output/model.pth") From 592998a412800c1de2f45cab8b97e0cf8cdb9ed4 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 1 Feb 2025 23:23:00 -0500 Subject: [PATCH 048/141] add build-and-publish docs workflow --- .github/dependabot.yml | 3 --- .github/workflows/docs.yml | 30 ++++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/docs.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 67b710d9..0e1c39b9 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -6,6 +6,3 @@ updates: directory: "/" schedule: interval: "daily" - ignore: - - dependency-name: "sphinx" - - dependency-name: "furo" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 00000000..4269254a --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,30 @@ +name: Build and publish docs + +on: + push: + branches: [main] + +jobs: + + publish-docs: + runs-on: ubuntu-latest + permissions: + pages: write + id-token: write + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v3.2.2 + with: + version: "0.5.0" + python-version-file: ".python-version" + enable-cache: true + - run: uv run --group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html + - uses: actions/configure-pages@v5 + - uses: actions/upload-pages-artifact@v2 + with: + path: docs/_build/html + - id: deployment + uses: actions/deploy-pages@v3 From 1b998909f1d3b406ca083d53e983310b9be6aebb Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 2 Feb 2025 00:16:09 -0500 Subject: [PATCH 049/141] added CLI arguments --- .../scripts/torchrunx_transformers.py | 19 +++++++++---------- docs/source/examples/transformers.md | 4 ++++ 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/docs/source/examples/scripts/torchrunx_transformers.py b/docs/source/examples/scripts/torchrunx_transformers.py index 0154e2e0..5cc56899 100644 --- a/docs/source/examples/scripts/torchrunx_transformers.py +++ b/docs/source/examples/scripts/torchrunx_transformers.py @@ -5,6 +5,7 @@ # "tensorboard", # "torchrunx", # "transformers[torch]", +# "tyro", # ] # /// @@ -21,6 +22,7 @@ Trainer, TrainingArguments, ) +import tyro def build_model() -> PreTrainedModel: @@ -65,20 +67,17 @@ def train( return model -if __name__ == "__main__": +def main(launcher: torchrunx.Launcher, args: TrainingArguments): model = build_model() - training_args = TrainingArguments( - output_dir="output", - per_device_train_batch_size=2, - report_to="tensorboard", - ) train_dataset = load_training_data() - results = torchrunx.launch( + results = launcher.run( func=train, - func_args=(model, training_args, train_dataset), - hostnames=["localhost"], - workers_per_host=2, + func_args=(model, args, train_dataset), ) model = results.rank(0) + + +if __name__ == "__main__": + tyro.cli(main) diff --git a/docs/source/examples/transformers.md b/docs/source/examples/transformers.md index 68d0c26e..432312b6 100644 --- a/docs/source/examples/transformers.md +++ b/docs/source/examples/transformers.md @@ -1,5 +1,9 @@ # Transformers +```bash +uv run torchrunx_transformers.py --launcher.hostnames localhost --launcher.workers-per-host 2 --args.output_dir output --args.per-device-train-batch-size 4 --args.report-to tensorboard +``` + ```{eval-rst} .. literalinclude:: ./scripts/torchrunx_transformers.py :start-after: # [docs:include] From 23327b3aafbce7b6e0415f95a55e59a8b9be2875 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Wed, 5 Feb 2025 14:17:04 -0500 Subject: [PATCH 050/141] updated transformers training script --- docs/source/examples/accelerate.md | 3 +- ...nx_accelerate.py => accelerate_example.py} | 4 +- .../scripts/torchrunx_transformers.py | 83 ----------------- docs/source/examples/transformers.md | 7 +- docs/source/examples/transformers_example.py | 92 +++++++++++++++++++ 5 files changed, 99 insertions(+), 90 deletions(-) rename docs/source/examples/{scripts/torchrunx_accelerate.py => accelerate_example.py} (99%) delete mode 100644 docs/source/examples/scripts/torchrunx_transformers.py create mode 100644 docs/source/examples/transformers_example.py diff --git a/docs/source/examples/accelerate.md b/docs/source/examples/accelerate.md index f7c4a22d..b8d75951 100644 --- a/docs/source/examples/accelerate.md +++ b/docs/source/examples/accelerate.md @@ -1,6 +1,5 @@ # Accelerate ```{eval-rst} -.. literalinclude:: ./scripts/torchrunx_accelerate.py - :start-after: # [docs:include] +.. literalinclude:: ./accelerate_example.py ``` diff --git a/docs/source/examples/scripts/torchrunx_accelerate.py b/docs/source/examples/accelerate_example.py similarity index 99% rename from docs/source/examples/scripts/torchrunx_accelerate.py rename to docs/source/examples/accelerate_example.py index fb2228c0..6cb08937 100644 --- a/docs/source/examples/scripts/torchrunx_accelerate.py +++ b/docs/source/examples/accelerate_example.py @@ -10,10 +10,8 @@ # ] # /// -# [docs:include] from pathlib import Path -import torchrunx import torch from accelerate import Accelerator from datasets import load_dataset @@ -21,6 +19,8 @@ from torch.utils.data import Dataset from transformers import AutoModelForCausalLM, AutoTokenizer +import torchrunx + class GPT2CausalLMDataset(Dataset): def __init__(self, text_dataset): diff --git a/docs/source/examples/scripts/torchrunx_transformers.py b/docs/source/examples/scripts/torchrunx_transformers.py deleted file mode 100644 index 5cc56899..00000000 --- a/docs/source/examples/scripts/torchrunx_transformers.py +++ /dev/null @@ -1,83 +0,0 @@ -# /// script -# requires-python = ">=3.12" -# dependencies = [ -# "datasets", -# "tensorboard", -# "torchrunx", -# "transformers[torch]", -# "tyro", -# ] -# /// - -# [docs:include] -import os -import torchrunx - -from datasets import Dataset, load_dataset -from transformers import ( - AutoConfig, - AutoModelForCausalLM, - AutoTokenizer, - PreTrainedModel, - Trainer, - TrainingArguments, -) -import tyro - - -def build_model() -> PreTrainedModel: - config = AutoConfig.from_pretrained("gpt2") - model = AutoModelForCausalLM.from_config(config) - return model - - -def load_training_data() -> Dataset: - os.environ["TOKENIZERS_PARALLELISM"] = "false" # to suppress warnings - tokenizer = AutoTokenizer.from_pretrained("gpt2") - tokenizer.pad_token = tokenizer.eos_token - - return ( - load_dataset("Salesforce/wikitext", name="wikitext-2-v1", split="train") - .select(range(8)) - .map( - lambda x: tokenizer( - x["text"], - max_length=1024, - truncation=True, - padding="max_length", - ), - batched=True, - remove_columns=["text"], - ) - .map(lambda x: {"labels": x["input_ids"]}) - ) - - -def train( - model: PreTrainedModel, training_args: TrainingArguments, train_dataset: Dataset -) -> PreTrainedModel | None: - trainer = Trainer( - model=model, - args=training_args, - train_dataset=train_dataset, - ) - trainer.train() - - if int(os.environ["RANK"]) == 0: - return model - - -def main(launcher: torchrunx.Launcher, args: TrainingArguments): - model = build_model() - train_dataset = load_training_data() - - results = launcher.run( - func=train, - func_args=(model, args, train_dataset), - ) - - model = results.rank(0) - - -if __name__ == "__main__": - tyro.cli(main) diff --git a/docs/source/examples/transformers.md b/docs/source/examples/transformers.md index 432312b6..e6c54afb 100644 --- a/docs/source/examples/transformers.md +++ b/docs/source/examples/transformers.md @@ -1,10 +1,11 @@ # Transformers ```bash -uv run torchrunx_transformers.py --launcher.hostnames localhost --launcher.workers-per-host 2 --args.output_dir output --args.per-device-train-batch-size 4 --args.report-to tensorboard +uv run torchrun.xyz/torchrunx_transformers.py \ + --launcher.hostnames localhost --launcher.workers-per-host 2 \ + --args.output_dir output --args.per-device-train-batch-size 4 --args.report-to tensorboard ``` ```{eval-rst} -.. literalinclude:: ./scripts/torchrunx_transformers.py - :start-after: # [docs:include] +.. literalinclude:: ./transformers_example.py ``` diff --git a/docs/source/examples/transformers_example.py b/docs/source/examples/transformers_example.py new file mode 100644 index 00000000..47e4442e --- /dev/null +++ b/docs/source/examples/transformers_example.py @@ -0,0 +1,92 @@ +# /// script +# requires-python = ">=3.12" +# dependencies = [ +# "datasets", +# "tensorboard", +# "torchrunx", +# "transformers[torch]", +# "tyro", +# ] +# /// + +import os +from typing import Annotated + +import tyro +from datasets import Dataset, load_dataset +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, + PreTrainedModel, + Trainer, + TrainingArguments, +) + +import torchrunx + + +def build_model(name: str = "gpt2") -> PreTrainedModel: + return AutoModelForCausalLM.from_pretrained(name) + + +def load_training_data( + tokenizer_name: str, + path: str, + name: str | None = None, + split: str | None = None, + text_column_name: str = "text", + num_samples: int | None = None, +) -> Dataset: + os.environ["TOKENIZERS_PARALLELISM"] = "false" # to suppress warnings + tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + + dataset = load_dataset(path, name=name, split=split) + + if num_samples is None: + num_samples = len(dataset) + + return ( + dataset.select(range(num_samples)) + .map( + lambda x: tokenizer( + x[text_column_name], + max_length=tokenizer.model_max_length, + truncation=True, + padding="max_length", + ), + batched=True, + input_columns=[text_column_name], + remove_columns=[text_column_name], + ) + .map(lambda x: {"labels": x["input_ids"]}) + ) + + +def train( + model: PreTrainedModel, training_args: TrainingArguments, train_dataset: Dataset +) -> PreTrainedModel | None: + trainer = Trainer( + model=model, + args=training_args, + train_dataset=train_dataset, + ) + trainer.train() + + if int(os.environ["RANK"]) == 0: + return model + + +def main( + launcher: torchrunx.Launcher, + model: Annotated[PreTrainedModel, tyro.conf.arg(prefix_name=False, constructor=build_model)], + train_dataset: Annotated[Dataset, tyro.conf.arg(name="dataset", constructor=load_training_data)], + training_args: Annotated[TrainingArguments, tyro.conf.arg(name="trainer", help="")], +): + results = launcher.run(train, (model, training_args, train_dataset)) + model = results.rank(0) + + +if __name__ == "__main__": + tyro.cli(main) From 5d7d6fef97ccb1c36254033a72dfb8fe62334bea Mon Sep 17 00:00:00 2001 From: "peter_curtin@brown.edu" Date: Wed, 5 Feb 2025 15:27:43 -0500 Subject: [PATCH 051/141] ignore these files --- dsp.py | 88 +++++++++++++++++++++++++++++++++++++++++++++++++ dsp_config.json | 20 +++++++++++ 2 files changed, 108 insertions(+) create mode 100644 dsp.py create mode 100644 dsp_config.json diff --git a/dsp.py b/dsp.py new file mode 100644 index 00000000..0d3ccf2a --- /dev/null +++ b/dsp.py @@ -0,0 +1,88 @@ +from dataclasses import dataclass +from pathlib import Path + +import deepspeed +import torch + +from datasets import load_dataset +from torch import nn +from torch.utils.data import Dataset +from transformers import AutoModelForCausalLM, AutoTokenizer + +import torchrunx + + +class GPT2CausalLMDataset(Dataset): + def __init__(self, text_dataset): + self.dataset = text_dataset + self.tokenizer = AutoTokenizer.from_pretrained("gpt2") + self.tokenizer.pad_token = self.tokenizer.eos_token + self.max_length = 1024 + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, idx): + encoded = self.tokenizer( + self.dataset[idx]["text"], + max_length=self.max_length, + truncation=True, + padding="max_length", + return_tensors="pt", + ) + + input_ids = encoded.input_ids.squeeze() + attention_mask = encoded.attention_mask.squeeze() + labels = input_ids.clone() + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "labels": labels, + } + + +@dataclass +class DSPArgs: + deepspeed_config: str + # train_batch_size: int + # batch_size: int + + +def train(): + model = AutoModelForCausalLM.from_pretrained("gpt2") + # optimizer = torch.optim.Adam(model.parameters()) + wikitext_train = load_dataset("Salesforce/wikitext", "wikitext-2-v1", split="train") + train_dataset = GPT2CausalLMDataset(text_dataset=wikitext_train) + + loader = torch.utils.data.DataLoader(train_dataset, batch_size=8) + + model_engine, optimizer, _, _ = deepspeed.initialize( + args=DSPArgs(deepspeed_config="dsp_config.json"), + model=model, + model_parameters=model.parameters(), + ) + + model.train() + for batch_idx, batch in enumerate(loader): + if batch_idx == 10: + break + print(f"Step {batch_idx}") + + device_batch = {k: v.to(model.device) for k, v in batch.items()} + + model.zero_grad() + + loss = model_engine(**device_batch).loss + model_engine.backward(loss) + + model_engine.step() + + +if __name__ == "__main__": + Path("output").mkdir(exist_ok=True) + results = torchrunx.launch( + func=train, + hostnames=["localhost"], + workers_per_host=1, + ) diff --git a/dsp_config.json b/dsp_config.json new file mode 100644 index 00000000..c5bca23c --- /dev/null +++ b/dsp_config.json @@ -0,0 +1,20 @@ +{ + "zero_optimization": { + "stage": 1, + "reduce_bucket_size": 5e8 + }, + "optimizer": { + "type": "AdamW", + "params": { + //"lr": "auto", + "betas": [0.9, 0.999], + "eps": 1e-8, + //"weight_decay": "auto" + } + } + //"gradient_accumulation_steps": "auto", + //"gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": 8, + //"train_micro_batch_size_per_gpu": "auto", +} \ No newline at end of file From 9e897a5fdd09d5e8f01ffb1c8c88f44067a4fbf3 Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Wed, 5 Feb 2025 15:31:10 -0500 Subject: [PATCH 052/141] add rough deepspeed example --- docs/source/examples/deepspeed.md | 6 ++ docs/source/examples/deepspeed_config.json | 15 ++++ docs/source/examples/deepspeed_example.py | 88 ++++++++++++++++++++++ dsp_config.json | 20 ----- 4 files changed, 109 insertions(+), 20 deletions(-) create mode 100644 docs/source/examples/deepspeed.md create mode 100644 docs/source/examples/deepspeed_config.json create mode 100644 docs/source/examples/deepspeed_example.py delete mode 100644 dsp_config.json diff --git a/docs/source/examples/deepspeed.md b/docs/source/examples/deepspeed.md new file mode 100644 index 00000000..71c568b5 --- /dev/null +++ b/docs/source/examples/deepspeed.md @@ -0,0 +1,6 @@ +# DeepSpeed + +```{eval-rst} +.. literalinclude:: ./deepspeed_example.py +.. literalinclude:: ./deepspeed_config.py +``` diff --git a/docs/source/examples/deepspeed_config.json b/docs/source/examples/deepspeed_config.json new file mode 100644 index 00000000..042f8716 --- /dev/null +++ b/docs/source/examples/deepspeed_config.json @@ -0,0 +1,15 @@ +{ + "zero_optimization": { + "stage": 1, + "reduce_bucket_size": 5e8 + }, + "optimizer": { + "type": "AdamW", + "params": { + "betas": [0.9, 0.999], + "eps": 1e-8 + } + }, + "steps_per_print": 2000, + "train_batch_size": 8 +} \ No newline at end of file diff --git a/docs/source/examples/deepspeed_example.py b/docs/source/examples/deepspeed_example.py new file mode 100644 index 00000000..0d3ccf2a --- /dev/null +++ b/docs/source/examples/deepspeed_example.py @@ -0,0 +1,88 @@ +from dataclasses import dataclass +from pathlib import Path + +import deepspeed +import torch + +from datasets import load_dataset +from torch import nn +from torch.utils.data import Dataset +from transformers import AutoModelForCausalLM, AutoTokenizer + +import torchrunx + + +class GPT2CausalLMDataset(Dataset): + def __init__(self, text_dataset): + self.dataset = text_dataset + self.tokenizer = AutoTokenizer.from_pretrained("gpt2") + self.tokenizer.pad_token = self.tokenizer.eos_token + self.max_length = 1024 + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, idx): + encoded = self.tokenizer( + self.dataset[idx]["text"], + max_length=self.max_length, + truncation=True, + padding="max_length", + return_tensors="pt", + ) + + input_ids = encoded.input_ids.squeeze() + attention_mask = encoded.attention_mask.squeeze() + labels = input_ids.clone() + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "labels": labels, + } + + +@dataclass +class DSPArgs: + deepspeed_config: str + # train_batch_size: int + # batch_size: int + + +def train(): + model = AutoModelForCausalLM.from_pretrained("gpt2") + # optimizer = torch.optim.Adam(model.parameters()) + wikitext_train = load_dataset("Salesforce/wikitext", "wikitext-2-v1", split="train") + train_dataset = GPT2CausalLMDataset(text_dataset=wikitext_train) + + loader = torch.utils.data.DataLoader(train_dataset, batch_size=8) + + model_engine, optimizer, _, _ = deepspeed.initialize( + args=DSPArgs(deepspeed_config="dsp_config.json"), + model=model, + model_parameters=model.parameters(), + ) + + model.train() + for batch_idx, batch in enumerate(loader): + if batch_idx == 10: + break + print(f"Step {batch_idx}") + + device_batch = {k: v.to(model.device) for k, v in batch.items()} + + model.zero_grad() + + loss = model_engine(**device_batch).loss + model_engine.backward(loss) + + model_engine.step() + + +if __name__ == "__main__": + Path("output").mkdir(exist_ok=True) + results = torchrunx.launch( + func=train, + hostnames=["localhost"], + workers_per_host=1, + ) diff --git a/dsp_config.json b/dsp_config.json deleted file mode 100644 index c5bca23c..00000000 --- a/dsp_config.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "zero_optimization": { - "stage": 1, - "reduce_bucket_size": 5e8 - }, - "optimizer": { - "type": "AdamW", - "params": { - //"lr": "auto", - "betas": [0.9, 0.999], - "eps": 1e-8, - //"weight_decay": "auto" - } - } - //"gradient_accumulation_steps": "auto", - //"gradient_clipping": "auto", - "steps_per_print": 2000, - "train_batch_size": 8, - //"train_micro_batch_size_per_gpu": "auto", -} \ No newline at end of file From 857a9b0d3f08b3d26f96c5ea584a65ad92a5cdd5 Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Wed, 5 Feb 2025 15:35:37 -0500 Subject: [PATCH 053/141] typo --- docs/source/examples/deepspeed.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/examples/deepspeed.md b/docs/source/examples/deepspeed.md index 71c568b5..cfa8e657 100644 --- a/docs/source/examples/deepspeed.md +++ b/docs/source/examples/deepspeed.md @@ -2,5 +2,5 @@ ```{eval-rst} .. literalinclude:: ./deepspeed_example.py -.. literalinclude:: ./deepspeed_config.py +.. literalinclude:: ./deepspeed_config.json ``` From 25a9ca66b82289a7130069699a4fdd1a7db5af39 Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Thu, 6 Feb 2025 18:11:18 -0500 Subject: [PATCH 054/141] lightning example --- docs/source/examples/lightning.md | 5 ++ .../source/examples/lightning_example.py | 69 ++++++++++--------- 2 files changed, 43 insertions(+), 31 deletions(-) create mode 100644 docs/source/examples/lightning.md rename dsp.py => docs/source/examples/lightning_example.py (54%) diff --git a/docs/source/examples/lightning.md b/docs/source/examples/lightning.md new file mode 100644 index 00000000..164ace4a --- /dev/null +++ b/docs/source/examples/lightning.md @@ -0,0 +1,5 @@ +# Pytorch Lightning + +```{eval-rst} +.. literalinclude:: ./lightning.py +``` diff --git a/dsp.py b/docs/source/examples/lightning_example.py similarity index 54% rename from dsp.py rename to docs/source/examples/lightning_example.py index 0d3ccf2a..4e022ab8 100644 --- a/dsp.py +++ b/docs/source/examples/lightning_example.py @@ -1,10 +1,10 @@ -from dataclasses import dataclass +import os from pathlib import Path -import deepspeed +import lightning as L import torch - from datasets import load_dataset + from torch import nn from torch.utils.data import Dataset from transformers import AutoModelForCausalLM, AutoTokenizer @@ -42,47 +42,54 @@ def __getitem__(self, idx): } -@dataclass -class DSPArgs: - deepspeed_config: str - # train_batch_size: int - # batch_size: int +class GPT2LightningWrapper(L.LightningModule): + def __init__(self): + super().__init__() + self.model = AutoModelForCausalLM.from_pretrained("gpt2") + + def training_step(self, batch, batch_idx): + device_batch = {k: v.to(self.model.device) for k, v in batch.items()} + loss = self.model(**device_batch).loss + self.log("train_loss", loss) + return loss + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=1e-5) + return optimizer def train(): - model = AutoModelForCausalLM.from_pretrained("gpt2") - # optimizer = torch.optim.Adam(model.parameters()) + lightning_model = GPT2LightningWrapper() + wikitext_train = load_dataset("Salesforce/wikitext", "wikitext-2-v1", split="train") train_dataset = GPT2CausalLMDataset(text_dataset=wikitext_train) - - loader = torch.utils.data.DataLoader(train_dataset, batch_size=8) - - model_engine, optimizer, _, _ = deepspeed.initialize( - args=DSPArgs(deepspeed_config="dsp_config.json"), - model=model, - model_parameters=model.parameters(), + train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=8) + + trainer = L.Trainer( + accelerator="gpu", + limit_train_batches=10, + max_epochs=1, + devices=2, + num_nodes=1, + strategy="ddp", ) - model.train() - for batch_idx, batch in enumerate(loader): - if batch_idx == 10: - break - print(f"Step {batch_idx}") - - device_batch = {k: v.to(model.device) for k, v in batch.items()} - - model.zero_grad() + trainer.fit(model=lightning_model, train_dataloaders=train_loader) - loss = model_engine(**device_batch).loss - model_engine.backward(loss) - - model_engine.step() + if int(os.environ["RANK"]) == 0: + return trainer.model.model + return None if __name__ == "__main__": + # hack to prevent lightning from recognizing SLURM environment... + os.environ["SLURM_JOB_NAME"] = "bash" Path("output").mkdir(exist_ok=True) results = torchrunx.launch( func=train, hostnames=["localhost"], - workers_per_host=1, + workers_per_host=2, ) + + trained_model: nn.Module = results.rank(0) + torch.save(trained_model.state_dict(), "output/model.pth") From 5016c78d95212aa8abd016edec5654ff511bfe16 Mon Sep 17 00:00:00 2001 From: Peter Curtin <98424367+pmcurtin@users.noreply.github.com> Date: Thu, 6 Feb 2025 18:18:36 -0500 Subject: [PATCH 055/141] Update lightning.md type --- docs/source/examples/lightning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/examples/lightning.md b/docs/source/examples/lightning.md index 164ace4a..aac092a1 100644 --- a/docs/source/examples/lightning.md +++ b/docs/source/examples/lightning.md @@ -1,5 +1,5 @@ # Pytorch Lightning ```{eval-rst} -.. literalinclude:: ./lightning.py +.. literalinclude:: ./lightning_example.py ``` From bba941b750db1658995a2e985038bcf63b9d6284 Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Fri, 7 Feb 2025 15:17:28 -0500 Subject: [PATCH 056/141] new ext module and fix for lightning example --- docs/source/examples/lightning_example.py | 5 ++--- src/torchrunx/ext/__init__.py | 1 + src/torchrunx/ext/lightning.py | 11 +++++++++++ 3 files changed, 14 insertions(+), 3 deletions(-) create mode 100644 src/torchrunx/ext/__init__.py create mode 100644 src/torchrunx/ext/lightning.py diff --git a/docs/source/examples/lightning_example.py b/docs/source/examples/lightning_example.py index 4e022ab8..c8cf2421 100644 --- a/docs/source/examples/lightning_example.py +++ b/docs/source/examples/lightning_example.py @@ -10,7 +10,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer import torchrunx - +from torchrunx.ext.lightning import TorchrunxClusterEnvironment class GPT2CausalLMDataset(Dataset): def __init__(self, text_dataset): @@ -72,6 +72,7 @@ def train(): devices=2, num_nodes=1, strategy="ddp", + plugins=[TorchrunxClusterEnvironment()] ) trainer.fit(model=lightning_model, train_dataloaders=train_loader) @@ -82,8 +83,6 @@ def train(): if __name__ == "__main__": - # hack to prevent lightning from recognizing SLURM environment... - os.environ["SLURM_JOB_NAME"] = "bash" Path("output").mkdir(exist_ok=True) results = torchrunx.launch( func=train, diff --git a/src/torchrunx/ext/__init__.py b/src/torchrunx/ext/__init__.py new file mode 100644 index 00000000..fa285e30 --- /dev/null +++ b/src/torchrunx/ext/__init__.py @@ -0,0 +1 @@ +"Extensions classes and functions." diff --git a/src/torchrunx/ext/lightning.py b/src/torchrunx/ext/lightning.py new file mode 100644 index 00000000..8e0c8468 --- /dev/null +++ b/src/torchrunx/ext/lightning.py @@ -0,0 +1,11 @@ +import torch +from lightning.fabric.plugins.environments.torchelastic import TorchElasticEnvironment + + +class TorchrunxClusterEnvironment(TorchElasticEnvironment): + """PyTorch Lightning ClusterEnvironment compatible with torchrunx.""" + + @staticmethod + def detect() -> bool: + """Returns ``True`` if the current process was launched using torchrunx.""" + return torch.distributed.is_available() From 96f2de5e4f980b6a16a22ee47b061a24602676c1 Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Fri, 7 Feb 2025 15:20:40 -0500 Subject: [PATCH 057/141] typing --- docs/source/examples/lightning_example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/examples/lightning_example.py b/docs/source/examples/lightning_example.py index c8cf2421..9778c3ea 100644 --- a/docs/source/examples/lightning_example.py +++ b/docs/source/examples/lightning_example.py @@ -47,7 +47,7 @@ def __init__(self): super().__init__() self.model = AutoModelForCausalLM.from_pretrained("gpt2") - def training_step(self, batch, batch_idx): + def training_step(self, batch, *args): # pyright: ignore device_batch = {k: v.to(self.model.device) for k, v in batch.items()} loss = self.model(**device_batch).loss self.log("train_loss", loss) From d130a8d851db8a671ae4388c504d8bd2d6259bb7 Mon Sep 17 00:00:00 2001 From: "peter_curtin@brown.edu" Date: Fri, 7 Feb 2025 15:45:49 -0500 Subject: [PATCH 058/141] checkpointing --- docs/source/examples/lightning_example.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/source/examples/lightning_example.py b/docs/source/examples/lightning_example.py index 9778c3ea..f977e516 100644 --- a/docs/source/examples/lightning_example.py +++ b/docs/source/examples/lightning_example.py @@ -72,14 +72,15 @@ def train(): devices=2, num_nodes=1, strategy="ddp", - plugins=[TorchrunxClusterEnvironment()] + plugins=[TorchrunxClusterEnvironment()], + enable_checkpointing=False ) trainer.fit(model=lightning_model, train_dataloaders=train_loader) + checkpoint = f"{trainer.log_dir}/final.ckpt" + trainer.save_checkpoint(checkpoint) - if int(os.environ["RANK"]) == 0: - return trainer.model.model - return None + return checkpoint if __name__ == "__main__": @@ -90,5 +91,5 @@ def train(): workers_per_host=2, ) - trained_model: nn.Module = results.rank(0) - torch.save(trained_model.state_dict(), "output/model.pth") + checkpoint_path = results.rank(0) + print(f"Checkpoint at: {checkpoint_path}") From d38f6d8337226a7a3bfab588c495f9447c5af092 Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Fri, 7 Feb 2025 15:48:34 -0500 Subject: [PATCH 059/141] fix ruff/type checks --- src/torchrunx/ext/__init__.py | 2 +- src/torchrunx/ext/lightning.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/torchrunx/ext/__init__.py b/src/torchrunx/ext/__init__.py index fa285e30..88896863 100644 --- a/src/torchrunx/ext/__init__.py +++ b/src/torchrunx/ext/__init__.py @@ -1 +1 @@ -"Extensions classes and functions." +"""Extensions classes and functions.""" diff --git a/src/torchrunx/ext/lightning.py b/src/torchrunx/ext/lightning.py index 8e0c8468..b3b6e6a9 100644 --- a/src/torchrunx/ext/lightning.py +++ b/src/torchrunx/ext/lightning.py @@ -1,3 +1,5 @@ +"""Pytorch Lightning extension utilities.""" + import torch from lightning.fabric.plugins.environments.torchelastic import TorchElasticEnvironment From cae668e02e26b9198bb48f824c989505c7102149 Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Fri, 7 Feb 2025 15:56:04 -0500 Subject: [PATCH 060/141] typing again --- src/torchrunx/ext/lightning.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/torchrunx/ext/lightning.py b/src/torchrunx/ext/lightning.py index b3b6e6a9..e467e01c 100644 --- a/src/torchrunx/ext/lightning.py +++ b/src/torchrunx/ext/lightning.py @@ -1,7 +1,9 @@ """Pytorch Lightning extension utilities.""" import torch -from lightning.fabric.plugins.environments.torchelastic import TorchElasticEnvironment +from lightning.fabric.plugins.environments.torchelastic import ( + TorchElasticEnvironment, # pyright: ignore [reportMissingImports] +) class TorchrunxClusterEnvironment(TorchElasticEnvironment): From 28ffc04a1555feaae5819994802f1affa82bd4bb Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Fri, 7 Feb 2025 15:57:01 -0500 Subject: [PATCH 061/141] remove direcotry creation --- docs/source/examples/lightning_example.py | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/source/examples/lightning_example.py b/docs/source/examples/lightning_example.py index f977e516..4da51f2a 100644 --- a/docs/source/examples/lightning_example.py +++ b/docs/source/examples/lightning_example.py @@ -84,7 +84,6 @@ def train(): if __name__ == "__main__": - Path("output").mkdir(exist_ok=True) results = torchrunx.launch( func=train, hostnames=["localhost"], From 45fe57c38138e631759e4644608486c2164d4fb4 Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Fri, 7 Feb 2025 16:07:26 -0500 Subject: [PATCH 062/141] actually fixed --- src/torchrunx/ext/lightning.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/torchrunx/ext/lightning.py b/src/torchrunx/ext/lightning.py index e467e01c..9d008535 100644 --- a/src/torchrunx/ext/lightning.py +++ b/src/torchrunx/ext/lightning.py @@ -1,8 +1,8 @@ """Pytorch Lightning extension utilities.""" import torch -from lightning.fabric.plugins.environments.torchelastic import ( - TorchElasticEnvironment, # pyright: ignore [reportMissingImports] +from lightning.fabric.plugins.environments.torchelastic import ( # pyright: ignore [reportMissingImports] + TorchElasticEnvironment, ) From 8ad727d77728f1c71aad5d8232209ab2e8926fd2 Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Fri, 7 Feb 2025 16:27:21 -0500 Subject: [PATCH 063/141] add GROUP_RANK as node rank --- src/torchrunx/agent.py | 1 + src/torchrunx/worker.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/torchrunx/agent.py b/src/torchrunx/agent.py index 27dd8489..d21dbf3a 100644 --- a/src/torchrunx/agent.py +++ b/src/torchrunx/agent.py @@ -83,6 +83,7 @@ def main(launcher_agent_group: LauncherAgentGroup, logger_hostname: str, logger_ backend=launcher_payload.backend, rank=worker_global_ranks[i], local_rank=i, + node_rank=agent_rank, local_world_size=num_workers, world_size=worker_world_size, hostname=launcher_payload.hostnames[agent_rank], diff --git a/src/torchrunx/worker.py b/src/torchrunx/worker.py index 12caf827..e7307520 100644 --- a/src/torchrunx/worker.py +++ b/src/torchrunx/worker.py @@ -32,6 +32,7 @@ class WorkerArgs: backend: Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None rank: int local_rank: int + node_rank: int local_world_size: int world_size: int hostname: str @@ -79,6 +80,7 @@ def worker_entrypoint(serialized_worker_args: SerializedWorkerArgs) -> Any | Exc os.environ["RANK"] = str(worker_args.rank) os.environ["LOCAL_RANK"] = str(worker_args.local_rank) + os.environ["GROUP_RANK"] = str(worker_args.node_rank) os.environ["LOCAL_WORLD_SIZE"] = str(worker_args.local_world_size) os.environ["WORLD_SIZE"] = str(worker_args.world_size) os.environ["MASTER_ADDR"] = worker_args.main_agent_hostname From c58cb11d05be2731d7a042f8e0fbe9ff622b7d1e Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 7 Feb 2025 16:37:59 -0500 Subject: [PATCH 064/141] updated examples --- README.md | 16 +- docs/conf.py | 2 + docs/source/api.md | 4 +- docs/source/examples/accelerate.md | 2 +- docs/source/examples/deepspeed.md | 4 +- docs/source/examples/lightning.md | 2 +- .../{ => scripts}/accelerate_example.py | 0 .../{ => scripts}/deepspeed_config.json | 0 .../{ => scripts}/deepspeed_example.py | 0 .../transformers_train.py} | 18 +- docs/source/examples/transformers.md | 42 +- docs/source/examples/transformers_help.txt | 478 ++++++++++++++++++ docs/source/how_it_works.md | 2 +- 13 files changed, 543 insertions(+), 27 deletions(-) rename docs/source/examples/{ => scripts}/accelerate_example.py (100%) rename docs/source/examples/{ => scripts}/deepspeed_config.json (100%) rename docs/source/examples/{ => scripts}/deepspeed_example.py (100%) rename docs/source/examples/{transformers_example.py => scripts/transformers_train.py} (87%) create mode 100644 docs/source/examples/transformers_help.txt diff --git a/README.md b/README.md index 0a956e09..4a7addac 100644 --- a/README.md +++ b/README.md @@ -73,14 +73,14 @@ trained_model: nn.Module = results.rank(0) torch.save(trained_model.state_dict(), "output/model.pth") ``` -**See [training GPT-2 on WikiText](https://torchrunx.readthedocs.io/stable/examples.html#training-gpt-2-on-wikitext) for more examples using the following deep learning libraries:** - - Accelerate - - HF Transformers - - DeepSpeed - - PyTorch Lightning - - MosaicML Composer - -**Refer to our [API](https://torchrunx.readthedocs.io/stable/api.html) and [Advanced Usage Guide](https://torchrunx.readthedocs.io/stable/advanced.html) for many more capabilities!** +**See examples where we fine-tune LLMs (e.g. GPT-2 on WikiText) using:** + - [Accelerate](https://torchrun.xyz/examples/accelerate.html) + - [HF Transformers](https://torchrun.xyz/examples/transformers.html) + - [DeepSpeed](https://torchrun.xyz/examples/deepspeed.html) + - [PyTorch Lightning](https://torchrun.xyz/examples/lightning.html) + - [MosaicML Composer](https://torchrun.xyz/examples/composer.html) + +**Refer to our [API](https://torchrun.xyz/api.html) and [Advanced Usage Guide](https://torchrun.xyz/advanced.html) for many more capabilities!** --- diff --git a/docs/conf.py b/docs/conf.py index 06e89f6c..eace1695 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -6,6 +6,8 @@ html_theme = "furo" language = "en" +html_extra_path = ["source/examples/scripts"] + extensions = [ "autodoc2", "myst_parser", # support markdown diff --git a/docs/source/api.md b/docs/source/api.md index 169602d3..d4774af0 100644 --- a/docs/source/api.md +++ b/docs/source/api.md @@ -1,9 +1,7 @@ # API -## Launching functions - ```{eval-rst} -.. autofunction:: torchrunx.launch(func: Callable, ...) +.. autofunction:: torchrunx.launch ``` We provide the {mod}`torchrunx.Launcher` class as an alias to {mod}`torchrunx.launch`. diff --git a/docs/source/examples/accelerate.md b/docs/source/examples/accelerate.md index b8d75951..33ec5ba5 100644 --- a/docs/source/examples/accelerate.md +++ b/docs/source/examples/accelerate.md @@ -1,5 +1,5 @@ # Accelerate ```{eval-rst} -.. literalinclude:: ./accelerate_example.py +.. literalinclude:: ./scripts/accelerate_example.py ``` diff --git a/docs/source/examples/deepspeed.md b/docs/source/examples/deepspeed.md index cfa8e657..f3b21a3f 100644 --- a/docs/source/examples/deepspeed.md +++ b/docs/source/examples/deepspeed.md @@ -1,6 +1,6 @@ # DeepSpeed ```{eval-rst} -.. literalinclude:: ./deepspeed_example.py -.. literalinclude:: ./deepspeed_config.json +.. literalinclude:: ./scripts/deepspeed_example.py +.. literalinclude:: ./scripts/deepspeed_config.json ``` diff --git a/docs/source/examples/lightning.md b/docs/source/examples/lightning.md index aac092a1..95786b49 100644 --- a/docs/source/examples/lightning.md +++ b/docs/source/examples/lightning.md @@ -1,5 +1,5 @@ # Pytorch Lightning ```{eval-rst} -.. literalinclude:: ./lightning_example.py +.. literalinclude:: ./scripts/lightning_example.py ``` diff --git a/docs/source/examples/accelerate_example.py b/docs/source/examples/scripts/accelerate_example.py similarity index 100% rename from docs/source/examples/accelerate_example.py rename to docs/source/examples/scripts/accelerate_example.py diff --git a/docs/source/examples/deepspeed_config.json b/docs/source/examples/scripts/deepspeed_config.json similarity index 100% rename from docs/source/examples/deepspeed_config.json rename to docs/source/examples/scripts/deepspeed_config.json diff --git a/docs/source/examples/deepspeed_example.py b/docs/source/examples/scripts/deepspeed_example.py similarity index 100% rename from docs/source/examples/deepspeed_example.py rename to docs/source/examples/scripts/deepspeed_example.py diff --git a/docs/source/examples/transformers_example.py b/docs/source/examples/scripts/transformers_train.py similarity index 87% rename from docs/source/examples/transformers_example.py rename to docs/source/examples/scripts/transformers_train.py index 47e4442e..387549c6 100644 --- a/docs/source/examples/transformers_example.py +++ b/docs/source/examples/scripts/transformers_train.py @@ -9,6 +9,8 @@ # ] # /// +# [docs:start-after] +import functools import os from typing import Annotated @@ -25,7 +27,7 @@ import torchrunx -def build_model(name: str = "gpt2") -> PreTrainedModel: +def build_model(name: str) -> PreTrainedModel: return AutoModelForCausalLM.from_pretrained(name) @@ -41,6 +43,12 @@ def load_training_data( tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token + tokenize_fn = functools.partial( + tokenizer, + max_length=tokenizer.model_max_length, + truncation=True, + padding="max_length", + ) dataset = load_dataset(path, name=name, split=split) @@ -50,12 +58,7 @@ def load_training_data( return ( dataset.select(range(num_samples)) .map( - lambda x: tokenizer( - x[text_column_name], - max_length=tokenizer.model_max_length, - truncation=True, - padding="max_length", - ), + tokenize_fn, batched=True, input_columns=[text_column_name], remove_columns=[text_column_name], @@ -74,6 +77,7 @@ def train( ) trainer.train() + # TODO: return checkpoint path if int(os.environ["RANK"]) == 0: return model diff --git a/docs/source/examples/transformers.md b/docs/source/examples/transformers.md index e6c54afb..8196b4d6 100644 --- a/docs/source/examples/transformers.md +++ b/docs/source/examples/transformers.md @@ -1,11 +1,45 @@ # Transformers +Here's an example script that uses `torchrunx` with [`transformers.Trainer`](https://huggingface.co/docs/transformers/en/main_classes/trainer) to fine-tune any causal language model (from `transformers`) on any text dataset (from `datasets`) with any number of GPUs or nodes: [https://torchrun.xyz/transformers_train.py](https://torchrun.xyz/transformers_train.py). + +You can pass command-line arguments to customize: + - `--launcher`: [torchrunx.Launcher](../api.md#torchrunx.Launcher) + - `--model`: [`transformers.AutoModelForCausalLM`](https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoModelForCausalLM) + - `--dataset`: [`transformers.AutoTokenizer`](https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoTokenizer) and [`datasets.load_dataset`](https://huggingface.co/docs/datasets/en/package_reference/loading_methods#datasets.load_dataset) + - `--trainer`: [`transformers.TrainingArguments`](https://huggingface.co/docs/transformers/en/main_classes/trainer#transformers.TrainingArguments) + +The following arguments are required: `--model.name`, `--dataset.tokenizer-name`, `--dataset.path`, `--trainer.output-dir`. + +
+

python transformers_train.py --help

(expand)
+ + ```{eval-rst} + .. literalinclude:: ./transformers_help.txt + ``` +
+ +Of course, this script is a template: you can also edit the script first, as desired. + +### Training GPT-2 on WikiText in One Line + +The following one-line command runs our script end-to-end (installing all dependencies, downloading model and data, training, logging to TensorBoard, etc.). + +Pre-requisites: [uv](https://docs.astral.sh/uv) + ```bash -uv run torchrun.xyz/torchrunx_transformers.py \ - --launcher.hostnames localhost --launcher.workers-per-host 2 \ - --args.output_dir output --args.per-device-train-batch-size 4 --args.report-to tensorboard +uv run https://torchrun.xyz/transformers_train.py \ + --model.name gpt2 --dataset.tokenizer-name gpt2 \ + --dataset.path "Salesforce/wikitext" --dataset.name "wikitext-2-v1" --dataset.split "train" --dataset.num-samples 80 \ + --trainer.output_dir output --trainer.per-device-train-batch-size 4 --trainer.report-to tensorboard ``` +We don't need to pass `--launcher` arguments by default. But if you want to do multi-node training (and are not using SLURM), you can also pass e.g. `--launcher.hostnames node1 node2`. + +### Script + +[The [raw source code](https://torchrun.xyz/transformers_train.py) also specifies dependencies at the top of the file — in [PEP 723](https://peps.python.org/pep-0723) format — e.g. for `uv` as above.] + ```{eval-rst} -.. literalinclude:: ./transformers_example.py +.. literalinclude:: ./scripts/transformers_train.py + :start-after: # [docs:start-after] ``` diff --git a/docs/source/examples/transformers_help.txt b/docs/source/examples/transformers_help.txt new file mode 100644 index 00000000..6dd4323e --- /dev/null +++ b/docs/source/examples/transformers_help.txt @@ -0,0 +1,478 @@ +usage: transformers_train.py [-h] [OPTIONS] + +╭─ options ──────────────────────────────────────────────────────────────────╮ +│ -h, --help │ +│ show this help message and exit │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ launcher options ─────────────────────────────────────────────────────────╮ +│ Useful for sequential invocations or for specifying arguments via CLI. │ +│ ────────────────────────────────────────────────────────────────────────── │ +│ --launcher.hostnames {[STR [STR ...]]}|{auto,slurm} │ +│ (default: auto) │ +│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ +│ (default: auto) │ +│ --launcher.ssh-config-file {None}|STR|PATHLIKE │ +│ (default: None) │ +│ --launcher.backend {None,nccl,gloo,mpi,ucc,auto} │ +│ (default: auto) │ +│ --launcher.timeout INT │ +│ (default: 600) │ +│ --launcher.default-env-vars [STR [STR ...]] │ +│ (default: PATH LD_LIBRARY LIBRARY_PATH 'PYTHON*' 'CUDA*' 'TORCH*' │ +│ 'PYTORCH*' 'NCCL*') │ +│ --launcher.extra-env-vars [STR [STR ...]] │ +│ (default: ) │ +│ --launcher.env-file {None}|STR|PATHLIKE │ +│ (default: None) │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ model options ────────────────────────────────────────────────────────────╮ +│ --model.name STR │ +│ (required) │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ dataset options ──────────────────────────────────────────────────────────╮ +│ --dataset.tokenizer-name STR │ +│ (required) │ +│ --dataset.path STR │ +│ (required) │ +│ --dataset.name {None}|STR │ +│ (default: None) │ +│ --dataset.split {None}|STR │ +│ (default: None) │ +│ --dataset.text-column-name STR │ +│ (default: text) │ +│ --dataset.num-samples {None}|INT │ +│ (default: None) │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ trainer options ──────────────────────────────────────────────────────────╮ +│ --trainer.output-dir STR │ +│ The output directory where the model predictions and checkpoints will │ +│ be written. (required) │ +│ --trainer.overwrite-output-dir, --trainer.no-overwrite-output-dir │ +│ Overwrite the content of the output directory. Use this to continue │ +│ training if output_dir points to a checkpoint directory. (default: │ +│ False) │ +│ --trainer.do-train, --trainer.no-do-train │ +│ Whether to run training. (default: False) │ +│ --trainer.do-eval, --trainer.no-do-eval │ +│ Whether to run eval on the dev set. (default: False) │ +│ --trainer.do-predict, --trainer.no-do-predict │ +│ Whether to run predictions on the test set. (default: False) │ +│ --trainer.eval-strategy {NO,STEPS,EPOCH}|STR │ +│ The evaluation strategy to use. (default: no) │ +│ --trainer.prediction-loss-only, --trainer.no-prediction-loss-only │ +│ When performing evaluation and predictions, only returns the loss. │ +│ (default: False) │ +│ --trainer.per-device-train-batch-size INT │ +│ Batch size per GPU/TPU/MPS/NPU core/CPU for training. (default: 8) │ +│ --trainer.per-device-eval-batch-size INT │ +│ Batch size per GPU/TPU/MPS/NPU core/CPU for evaluation. (default: 8) │ +│ --trainer.per-gpu-train-batch-size {None}|INT │ +│ Deprecated, the use of `--per_device_train_batch_size` is preferred. │ +│ Batch size per GPU/TPU core/CPU for training. (default: None) │ +│ --trainer.per-gpu-eval-batch-size {None}|INT │ +│ Deprecated, the use of `--per_device_eval_batch_size` is preferred. │ +│ Batch size per GPU/TPU core/CPU for evaluation. (default: None) │ +│ --trainer.gradient-accumulation-steps INT │ +│ Number of updates steps to accumulate before performing a │ +│ backward/update pass. (default: 1) │ +│ --trainer.eval-accumulation-steps {None}|INT │ +│ Number of predictions steps to accumulate before moving the tensors to │ +│ the CPU. (default: None) │ +│ --trainer.eval-delay {None}|FLOAT │ +│ Number of epochs or steps to wait for before the first evaluation can │ +│ be performed, depending on the eval_strategy. (default: 0) │ +│ --trainer.torch-empty-cache-steps {None}|INT │ +│ Number of steps to wait before calling │ +│ `torch..empty_cache()`.This can help avoid CUDA out-of-memory │ +│ errors by lowering peak VRAM usage at a cost of about [10% slower │ +│ performance](https://github.com/huggingface/transformers/issues/31372… │ +│ left unset or set to None, cache will not be emptied. (default: None) │ +│ --trainer.learning-rate FLOAT │ +│ The initial learning rate for AdamW. (default: 5e-05) │ +│ --trainer.weight-decay FLOAT │ +│ Weight decay for AdamW if we apply some. (default: 0.0) │ +│ --trainer.adam-beta1 FLOAT │ +│ Beta1 for AdamW optimizer (default: 0.9) │ +│ --trainer.adam-beta2 FLOAT │ +│ Beta2 for AdamW optimizer (default: 0.999) │ +│ --trainer.adam-epsilon FLOAT │ +│ Epsilon for AdamW optimizer. (default: 1e-08) │ +│ --trainer.max-grad-norm FLOAT │ +│ Max gradient norm. (default: 1.0) │ +│ --trainer.num-train-epochs FLOAT │ +│ Total number of training epochs to perform. (default: 3.0) │ +│ --trainer.max-steps INT │ +│ If > 0: set total number of training steps to perform. Override │ +│ num_train_epochs. (default: -1) │ +│ --trainer.lr-scheduler-type │ +│ {LINEAR,COSINE,COSINE_WITH_RESTARTS,POLYNOMIAL,CONSTANT,CONSTANT_WITH_WAR… │ +│ The scheduler type to use. (default: linear) │ +│ --trainer.lr-scheduler-kwargs {None}|{[STR STR [STR STR ...]]}|STR │ +│ Extra parameters for the lr_scheduler such as {'num_cycles': 1} for │ +│ the cosine with hard restarts. (default: ) │ +│ --trainer.warmup-ratio FLOAT │ +│ Linear warmup over warmup_ratio fraction of total steps. (default: │ +│ 0.0) │ +│ --trainer.warmup-steps INT │ +│ Linear warmup over warmup_steps. (default: 0) │ +│ --trainer.log-level {None}|STR │ +│ Logger log level to use on the main node. Possible choices are the log │ +│ levels as strings: 'debug', 'info', 'warning', 'error' and 'critical', │ +│ plus a 'passive' level which doesn't set anything and lets the │ +│ application set the level. Defaults to 'passive'. (default: passive) │ +│ --trainer.log-level-replica {None}|STR │ +│ Logger log level to use on replica nodes. Same choices and defaults as │ +│ ``log_level`` (default: warning) │ +│ --trainer.log-on-each-node, --trainer.no-log-on-each-node │ +│ When doing a multinode distributed training, whether to log once per │ +│ node or just once on the main node. (default: True) │ +│ --trainer.logging-dir {None}|STR │ +│ Tensorboard log dir. (default: None) │ +│ --trainer.logging-strategy {NO,STEPS,EPOCH}|STR │ +│ The logging strategy to use. (default: steps) │ +│ --trainer.logging-first-step, --trainer.no-logging-first-step │ +│ Log the first global_step (default: False) │ +│ --trainer.logging-steps FLOAT │ +│ Log every X updates steps. Should be an integer or a float in range │ +│ `[0,1)`. If smaller than 1, will be interpreted as ratio of total │ +│ training steps. (default: 500) │ +│ --trainer.logging-nan-inf-filter, --trainer.no-logging-nan-inf-filter │ +│ Filter nan and inf losses for logging. (default: True) │ +│ --trainer.save-strategy {NO,STEPS,EPOCH,BEST}|STR │ +│ The checkpoint save strategy to use. (default: steps) │ +│ --trainer.save-steps FLOAT │ +│ Save checkpoint every X updates steps. Should be an integer or a float │ +│ in range `[0,1)`. If smaller than 1, will be interpreted as ratio of │ +│ total training steps. (default: 500) │ +│ --trainer.save-total-limit {None}|INT │ +│ If a value is passed, will limit the total amount of checkpoints. │ +│ Deletes the older checkpoints in `output_dir`. When │ +│ `load_best_model_at_end` is enabled, the 'best' checkpoint according │ +│ to `metric_for_best_model` will always be retained in addition to the │ +│ most recent ones. For example, for `save_total_limit=5` and │ +│ `load_best_model_at_end=True`, the four last checkpoints will always │ +│ be retained alongside the best model. When `save_total_limit=1` and │ +│ `load_best_model_at_end=True`, it is possible that two checkpoints are │ +│ saved: the last one and the best one (if they are different). Default │ +│ is unlimited checkpoints (default: None) │ +│ --trainer.save-safetensors {None,True,False} │ +│ Use safetensors saving and loading for state dicts instead of default │ +│ torch.load and torch.save. (default: True) │ +│ --trainer.save-on-each-node, --trainer.no-save-on-each-node │ +│ When doing multi-node distributed training, whether to save models and │ +│ checkpoints on each node, or only on the main one (default: False) │ +│ --trainer.save-only-model, --trainer.no-save-only-model │ +│ When checkpointing, whether to only save the model, or also the │ +│ optimizer, scheduler & rng state.Note that when this is true, you │ +│ won't be able to resume training from checkpoint.This enables you to │ +│ save storage by not storing the optimizer, scheduler & rng state.You │ +│ can only load the model using from_pretrained with this option set to │ +│ True. (default: False) │ +│ --trainer.restore-callback-states-from-checkpoint, │ +│ --trainer.no-restore-callback-states-from-checkpoint │ +│ Whether to restore the callback states from the checkpoint. If `True`, │ +│ will override callbacks passed to the `Trainer` if they exist in the │ +│ checkpoint. (default: False) │ +│ --trainer.no-cuda, --trainer.no-no-cuda │ +│ This argument is deprecated. It will be removed in version 5.0 of 🤗 │ +│ Transformers. (default: False) │ +│ --trainer.use-cpu, --trainer.no-use-cpu │ +│ Whether or not to use cpu. If set to False, we will use │ +│ cuda/tpu/mps/npu device if available. (default: False) │ +│ --trainer.use-mps-device, --trainer.no-use-mps-device │ +│ This argument is deprecated. `mps` device will be used if available │ +│ similar to `cuda` device. It will be removed in version 5.0 of 🤗 │ +│ Transformers (default: False) │ +│ --trainer.seed INT │ +│ Random seed that will be set at the beginning of training. (default: │ +│ 42) │ +│ --trainer.data-seed {None}|INT │ +│ Random seed to be used with data samplers. (default: None) │ +│ --trainer.jit-mode-eval, --trainer.no-jit-mode-eval │ +│ Whether or not to use PyTorch jit trace for inference (default: False) │ +│ --trainer.use-ipex, --trainer.no-use-ipex │ +│ Use Intel extension for PyTorch when it is available, installation: │ +│ 'https://github.com/intel/intel-extension-for-pytorch' (default: │ +│ False) │ +│ --trainer.bf16, --trainer.no-bf16 │ +│ Whether to use bf16 (mixed) precision instead of 32-bit. Requires │ +│ Ampere or higher NVIDIA architecture or using CPU (use_cpu) or Ascend │ +│ NPU. This is an experimental API and it may change. (default: False) │ +│ --trainer.fp16, --trainer.no-fp16 │ +│ Whether to use fp16 (mixed) precision instead of 32-bit (default: │ +│ False) │ +│ --trainer.fp16-opt-level STR │ +│ For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', │ +│ and 'O3']. See details at https://nvidia.github.io/apex/amp.html │ +│ (default: O1) │ +│ --trainer.half-precision-backend STR │ +│ The backend to be used for half precision. (default: auto) │ +│ --trainer.bf16-full-eval, --trainer.no-bf16-full-eval │ +│ Whether to use full bfloat16 evaluation instead of 32-bit. This is an │ +│ experimental API and it may change. (default: False) │ +│ --trainer.fp16-full-eval, --trainer.no-fp16-full-eval │ +│ Whether to use full float16 evaluation instead of 32-bit (default: │ +│ False) │ +│ --trainer.tf32 {None,True,False} │ +│ Whether to enable tf32 mode, available in Ampere and newer GPU │ +│ architectures. This is an experimental API and it may change. │ +│ (default: None) │ +│ --trainer.local-rank INT │ +│ For distributed training: local_rank (default: -1) │ +│ --trainer.ddp-backend {None}|STR │ +│ The backend to be used for distributed training (default: None) │ +│ --trainer.tpu-num-cores {None}|INT │ +│ TPU: Number of TPU cores (automatically passed by launcher script) │ +│ (default: None) │ +│ --trainer.tpu-metrics-debug, --trainer.no-tpu-metrics-debug │ +│ Deprecated, the use of `--debug tpu_metrics_debug` is preferred. TPU: │ +│ Whether to print debug metrics (default: False) │ +│ --trainer.debug STR|{[{UNDERFLOW_OVERFLOW,TPU_METRICS_DEBUG} [...]]} │ +│ Whether or not to enable debug mode. Current options: │ +│ `underflow_overflow` (Detect underflow and overflow in activations and │ +│ weights), `tpu_metrics_debug` (print debug metrics on TPU). (default: │ +│ '') │ +│ --trainer.dataloader-drop-last, --trainer.no-dataloader-drop-last │ +│ Drop the last incomplete batch if it is not divisible by the batch │ +│ size. (default: False) │ +│ --trainer.eval-steps {None}|FLOAT │ +│ Run an evaluation every X steps. Should be an integer or a float in │ +│ range `[0,1)`. If smaller than 1, will be interpreted as ratio of │ +│ total training steps. (default: None) │ +│ --trainer.dataloader-num-workers INT │ +│ Number of subprocesses to use for data loading (PyTorch only). 0 means │ +│ that the data will be loaded in the main process. (default: 0) │ +│ --trainer.dataloader-prefetch-factor {None}|INT │ +│ Number of batches loaded in advance by each worker. 2 means there will │ +│ be a total of 2 * num_workers batches prefetched across all workers. │ +│ Default is 2 for PyTorch < 2.0.0 and otherwise None. (default: None) │ +│ --trainer.past-index INT │ +│ If >=0, uses the corresponding part of the output as the past state │ +│ for next step. (default: -1) │ +│ --trainer.run-name {None}|STR │ +│ An optional descriptor for the run. Notably used for wandb, mlflow and │ +│ comet logging. (default: None) │ +│ --trainer.disable-tqdm {None,True,False} │ +│ Whether or not to disable the tqdm progress bars. (default: None) │ +│ --trainer.remove-unused-columns {None,True,False} │ +│ Remove columns not required by the model when using an nlp.Dataset. │ +│ (default: True) │ +│ --trainer.label-names {None}|{[STR [STR ...]]} │ +│ The list of keys in your dictionary of inputs that correspond to the │ +│ labels. (default: None) │ +│ --trainer.load-best-model-at-end {None,True,False} │ +│ Whether or not to load the best model found during training at the end │ +│ of training. When this option is enabled, the best checkpoint will │ +│ always be saved. See `save_total_limit` for more. (default: False) │ +│ --trainer.metric-for-best-model {None}|STR │ +│ The metric to use to compare two different models. (default: None) │ +│ --trainer.greater-is-better {None,True,False} │ +│ Whether the `metric_for_best_model` should be maximized or not. │ +│ (default: None) │ +│ --trainer.ignore-data-skip, --trainer.no-ignore-data-skip │ +│ When resuming training, whether or not to skip the first epochs and │ +│ batches to get to the same training data. (default: False) │ +│ --trainer.fsdp │ +│ {None}|{[{FULL_SHARD,SHARD_GRAD_OP,NO_SHARD,HYBRID_SHARD,HYBRID_SHARD_ZER… │ +│ [...]]}|STR │ +│ Whether or not to use PyTorch Fully Sharded Data Parallel (FSDP) │ +│ training (in distributed training only). The base option should be │ +│ `full_shard`, `shard_grad_op` or `no_shard` and you can add │ +│ CPU-offload to `full_shard` or `shard_grad_op` like this: full_shard │ +│ offload` or `shard_grad_op offload`. You can add auto-wrap to │ +│ `full_shard` or `shard_grad_op` with the same syntax: full_shard │ +│ auto_wrap` or `shard_grad_op auto_wrap`. (default: '') │ +│ --trainer.fsdp-min-num-params INT │ +│ This parameter is deprecated. FSDP's minimum number of parameters for │ +│ Default Auto Wrapping. (useful only when `fsdp` field is passed). │ +│ (default: 0) │ +│ --trainer.fsdp-config {None}|{[STR STR [STR STR ...]]}|STR │ +│ Config to be used with FSDP (Pytorch Fully Sharded Data Parallel). │ +│ The value is either a fsdp json config file (e.g., `fsdp_config.json`) │ +│ or an already loaded json file as `dict`. (default: None) │ +│ --trainer.fsdp-transformer-layer-cls-to-wrap {None}|STR │ +│ This parameter is deprecated. Transformer layer class name │ +│ (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` │ +│ .... (useful only when `fsdp` flag is passed). (default: None) │ +│ --trainer.accelerator-config {None}|{[STR STR [STR STR ...]]}|STR │ +│ Config to be used with the internal Accelerator object initializtion. │ +│ The value is either a accelerator json config file (e.g., │ +│ `accelerator_config.json`) or an already loaded json file as `dict`. │ +│ (default: None) │ +│ --trainer.deepspeed {None}|{[STR STR [STR STR ...]]}|STR │ +│ Enable deepspeed and pass the path to deepspeed json config file (e.g. │ +│ `ds_config.json`) or an already loaded json file as a dict (default: │ +│ None) │ +│ --trainer.label-smoothing-factor FLOAT │ +│ The label smoothing epsilon to apply (zero means no label smoothing). │ +│ (default: 0.0) │ +│ --trainer.optim │ +│ {ADAMW_HF,ADAMW_TORCH,ADAMW_TORCH_FUSED,ADAMW_TORCH_XLA,ADAMW_TORCH_NPU_F… │ +│ The optimizer to use. (default: adamw_torch) │ +│ --trainer.optim-args {None}|STR │ +│ Optional arguments to supply to optimizer. (default: None) │ +│ --trainer.adafactor, --trainer.no-adafactor │ +│ Whether or not to replace AdamW by Adafactor. (default: False) │ +│ --trainer.group-by-length, --trainer.no-group-by-length │ +│ Whether or not to group samples of roughly the same length together │ +│ when batching. (default: False) │ +│ --trainer.length-column-name {None}|STR │ +│ Column name with precomputed lengths to use when grouping by length. │ +│ (default: length) │ +│ --trainer.report-to {None}|STR|{[STR [STR ...]]} │ +│ The list of integrations to report the results and logs to. (default: │ +│ None) │ +│ --trainer.ddp-find-unused-parameters {None,True,False} │ +│ When using distributed training, the value of the flag │ +│ `find_unused_parameters` passed to `DistributedDataParallel`. │ +│ (default: None) │ +│ --trainer.ddp-bucket-cap-mb {None}|INT │ +│ When using distributed training, the value of the flag `bucket_cap_mb` │ +│ passed to `DistributedDataParallel`. (default: None) │ +│ --trainer.ddp-broadcast-buffers {None,True,False} │ +│ When using distributed training, the value of the flag │ +│ `broadcast_buffers` passed to `DistributedDataParallel`. (default: │ +│ None) │ +│ --trainer.dataloader-pin-memory, --trainer.no-dataloader-pin-memory │ +│ Whether or not to pin memory for DataLoader. (default: True) │ +│ --trainer.dataloader-persistent-workers, │ +│ --trainer.no-dataloader-persistent-workers │ +│ If True, the data loader will not shut down the worker processes after │ +│ a dataset has been consumed once. This allows to maintain the workers │ +│ Dataset instances alive. Can potentially speed up training, but will │ +│ increase RAM usage. (default: False) │ +│ --trainer.skip-memory-metrics, --trainer.no-skip-memory-metrics │ +│ Whether or not to skip adding of memory profiler reports to metrics. │ +│ (default: True) │ +│ --trainer.use-legacy-prediction-loop, │ +│ --trainer.no-use-legacy-prediction-loop │ +│ Whether or not to use the legacy prediction_loop in the Trainer. │ +│ (default: False) │ +│ --trainer.push-to-hub, --trainer.no-push-to-hub │ +│ Whether or not to upload the trained model to the model hub after │ +│ training. (default: False) │ +│ --trainer.resume-from-checkpoint {None}|STR │ +│ The path to a folder with a valid checkpoint for your model. (default: │ +│ None) │ +│ --trainer.hub-model-id {None}|STR │ +│ The name of the repository to keep in sync with the local │ +│ `output_dir`. (default: None) │ +│ --trainer.hub-strategy {END,EVERY_SAVE,CHECKPOINT,ALL_CHECKPOINTS}|STR │ +│ The hub strategy to use when `--push_to_hub` is activated. (default: │ +│ every_save) │ +│ --trainer.hub-token {None}|STR │ +│ The token to use to push to the Model Hub. (default: None) │ +│ --trainer.hub-private-repo {None,True,False} │ +│ Whether to make the repo private. If `None` (default), the repo will │ +│ be public unless the organization's default is private. This value is │ +│ ignored if the repo already exists. (default: None) │ +│ --trainer.hub-always-push, --trainer.no-hub-always-push │ +│ Unless `True`, the Trainer will skip pushes if the previous one wasn't │ +│ finished yet. (default: False) │ +│ --trainer.gradient-checkpointing, --trainer.no-gradient-checkpointing │ +│ If True, use gradient checkpointing to save memory at the expense of │ +│ slower backward pass. (default: False) │ +│ --trainer.gradient-checkpointing-kwargs {None}|{[STR STR [STR STR │ +│ ...]]}|STR │ +│ Gradient checkpointing key word arguments such as `use_reentrant`. │ +│ Will be passed to `torch.utils.checkpoint.checkpoint` through │ +│ `model.gradient_checkpointing_enable`. (default: None) │ +│ --trainer.include-inputs-for-metrics, │ +│ --trainer.no-include-inputs-for-metrics │ +│ This argument is deprecated and will be removed in version 5 of 🤗 │ +│ Transformers. Use `include_for_metrics` instead. (default: False) │ +│ --trainer.include-for-metrics [STR [STR ...]] │ +│ List of strings to specify additional data to include in the │ +│ `compute_metrics` function.Options: 'inputs', 'loss'. (default: ) │ +│ --trainer.eval-do-concat-batches, --trainer.no-eval-do-concat-batches │ +│ Whether to recursively concat inputs/losses/labels/predictions across │ +│ batches. If `False`, will instead store them as lists, with each batch │ +│ kept separate. (default: True) │ +│ --trainer.fp16-backend STR │ +│ Deprecated. Use half_precision_backend instead (default: auto) │ +│ --trainer.evaluation-strategy {None,NO,STEPS,EPOCH}|STR │ +│ Deprecated. Use `eval_strategy` instead (default: None) │ +│ --trainer.push-to-hub-model-id {None}|STR │ +│ The name of the repository to which push the `Trainer`. (default: │ +│ None) │ +│ --trainer.push-to-hub-organization {None}|STR │ +│ The name of the organization in with to which push the `Trainer`. │ +│ (default: None) │ +│ --trainer.push-to-hub-token {None}|STR │ +│ The token to use to push to the Model Hub. (default: None) │ +│ --trainer.mp-parameters STR │ +│ Used by the SageMaker launcher to send mp-specific args. Ignored in │ +│ Trainer (default: '') │ +│ --trainer.auto-find-batch-size, --trainer.no-auto-find-batch-size │ +│ Whether to automatically decrease the batch size in half and rerun the │ +│ training loop again each time a CUDA Out-of-Memory was reached │ +│ (default: False) │ +│ --trainer.full-determinism, --trainer.no-full-determinism │ +│ Whether to call enable_full_determinism instead of set_seed for │ +│ reproducibility in distributed training. Important: this will │ +│ negatively impact the performance, so only use it for debugging. │ +│ (default: False) │ +│ --trainer.torchdynamo {None}|STR │ +│ This argument is deprecated, use `--torch_compile_backend` instead. │ +│ (default: None) │ +│ --trainer.ray-scope {None}|STR │ +│ The scope to use when doing hyperparameter search with Ray. By │ +│ default, `"last"` will be used. Ray will then use the last checkpoint │ +│ of all trials, compare those, and select the best one. However, other │ +│ options are also available. See the Ray documentation │ +│ (https://docs.ray.io/en/latest/tune/api_docs/analysis.html#ray.tune.E… │ +│ for more options. (default: last) │ +│ --trainer.ddp-timeout {None}|INT │ +│ Overrides the default timeout for distributed training (value should │ +│ be given in seconds). (default: 1800) │ +│ --trainer.torch-compile, --trainer.no-torch-compile │ +│ If set to `True`, the model will be wrapped in `torch.compile`. │ +│ (default: False) │ +│ --trainer.torch-compile-backend {None}|STR │ +│ Which backend to use with `torch.compile`, passing one will trigger a │ +│ model compilation. (default: None) │ +│ --trainer.torch-compile-mode {None}|STR │ +│ Which mode to use with `torch.compile`, passing one will trigger a │ +│ model compilation. (default: None) │ +│ --trainer.dispatch-batches {None,True,False} │ +│ Deprecated. Pass {'dispatch_batches':VALUE} to `accelerator_config`. │ +│ (default: None) │ +│ --trainer.split-batches {None,True,False} │ +│ Deprecated. Pass {'split_batches':True} to `accelerator_config`. │ +│ (default: None) │ +│ --trainer.include-tokens-per-second {None,True,False} │ +│ If set to `True`, the speed metrics will include `tgs` (tokens per │ +│ second per device). (default: False) │ +│ --trainer.include-num-input-tokens-seen {None,True,False} │ +│ If set to `True`, will track the number of input tokens seen │ +│ throughout training. (May be slower in distributed training) (default: │ +│ False) │ +│ --trainer.neftune-noise-alpha {None}|FLOAT │ +│ Activates neftune noise embeddings into the model. NEFTune has been │ +│ proven to drastically improve model performances for instrcution │ +│ fine-tuning. Check out the original paper here: │ +│ https://arxiv.org/abs/2310.05914 and the original code here: │ +│ https://github.com/neelsjain/NEFTune. Only supported for │ +│ `PreTrainedModel` and `PeftModel` classes. (default: None) │ +│ --trainer.optim-target-modules {None}|STR|{[STR [STR ...]]} │ +│ Target modules for the optimizer defined in the `optim` argument. Only │ +│ used for the GaLore optimizer at the moment. (default: None) │ +│ --trainer.batch-eval-metrics, --trainer.no-batch-eval-metrics │ +│ Break eval metrics calculation into batches to save memory. (default: │ +│ False) │ +│ --trainer.eval-on-start, --trainer.no-eval-on-start │ +│ Whether to run through the entire `evaluation` step at the very │ +│ beginning of training as a sanity check. (default: False) │ +│ --trainer.use-liger-kernel {None,True,False} │ +│ Whether or not to enable the Liger Kernel for model training. │ +│ (default: False) │ +│ --trainer.eval-use-gather-object {None,True,False} │ +│ Whether to run recursively gather object in a nested │ +│ list/tuple/dictionary of objects from all devices. (default: False) │ +│ --trainer.average-tokens-across-devices {None,True,False} │ +│ Whether or not to average tokens across devices. If enabled, will use │ +│ all_reduce to synchronize num_tokens_in_batch for precise loss │ +│ calculation. Reference: │ +│ https://github.com/huggingface/transformers/issues/34242 (default: │ +│ False) │ +╰────────────────────────────────────────────────────────────────────────────╯ diff --git a/docs/source/how_it_works.md b/docs/source/how_it_works.md index 4062e87d..7bf35cb2 100644 --- a/docs/source/how_it_works.md +++ b/docs/source/how_it_works.md @@ -1,4 +1,4 @@ -# How it works +# How It Works If you want to (e.g.) train your model on several machines with **N** GPUs each, you should run your training function in **N** parallel processes on each machine. During training, each of these processes runs the same training code (i.e. your function) and communicate with each other (e.g. to synchronize gradients) using a [distributed process group](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group). From ad9d446d36ef76d5fc52c99586953e791ab0bc6e Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 7 Feb 2025 17:03:30 -0500 Subject: [PATCH 065/141] update docs and deps --- docs/conf.py | 75 +- docs/linkcode_github.py | 62 + docs/source/examples/accelerate.md | 2 +- docs/source/examples/deepspeed.md | 2 +- docs/source/examples/lightning.md | 2 +- ...elerate_example.py => accelerate_train.py} | 0 ...eepspeed_example.py => deepspeed_train.py} | 0 .../lightning_train.py} | 0 .../{ => scripts}/transformers_help.txt | 0 docs/source/examples/transformers.md | 2 +- pyproject.toml | 2 +- uv.lock | 1118 +++++++++-------- 12 files changed, 662 insertions(+), 603 deletions(-) create mode 100644 docs/linkcode_github.py rename docs/source/examples/scripts/{accelerate_example.py => accelerate_train.py} (100%) rename docs/source/examples/scripts/{deepspeed_example.py => deepspeed_train.py} (100%) rename docs/source/examples/{lightning_example.py => scripts/lightning_train.py} (100%) rename docs/source/examples/{ => scripts}/transformers_help.txt (100%) diff --git a/docs/conf.py b/docs/conf.py index eace1695..04a756fb 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,12 +1,17 @@ """Configuration file for the Sphinx documentation builder.""" +from glob import glob +import shutil + +shutil.copyfile("../README.md", "source/README.md") + project = "torchrunx" github_username = "apoorvkh" github_repository = "torchrunx" html_theme = "furo" language = "en" -html_extra_path = ["source/examples/scripts"] +html_extra_path = list(glob("source/examples/scripts/*.py")) extensions = [ "autodoc2", @@ -27,69 +32,5 @@ "numpy": ("https://numpy.org/doc/stable", None), } -## Copy README.md to docs/source/ -import shutil -shutil.copyfile("../README.md", "source/README.md") -## End of "Copy README.md to docs/source/" - -## sphinx.ext.linkcode configuration -# Link code to Github source -# From: https://github.com/scikit-learn/scikit-learn/blob/main/doc/sphinxext/github_link.py - -import inspect -import os -import subprocess -import sys -from operator import attrgetter - -package = project - -try: - revision = ( - subprocess.check_output("git rev-parse --short HEAD".split()).strip().decode("utf-8") - ) -except (subprocess.CalledProcessError, OSError): - print("Failed to execute git to get revision") - revision = None - -url_fmt = ( - f"https://github.com/{github_username}/{github_repository}/" - "blob/{revision}/src/{package}/{path}#L{lineno}" -) - -def linkcode_resolve(domain, info): - if revision is None: - return - if domain not in ("py", "pyx"): - return - if not info.get("module") or not info.get("fullname"): - return - - class_name = info["fullname"].split(".")[0] - module = __import__(info["module"], fromlist=[class_name]) - obj = attrgetter(info["fullname"])(module) - - # Unwrap the object to get the correct source - # file in case that is wrapped by a decorator - obj = inspect.unwrap(obj) - - try: - fn = inspect.getsourcefile(obj) - except Exception: - fn = None - if not fn: - try: - fn = inspect.getsourcefile(sys.modules[obj.__module__]) - except Exception: - fn = None - if not fn: - return - - fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__)) - try: - lineno = inspect.getsourcelines(obj)[1] - except Exception: - lineno = "" - return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno) - -## End of "sphinx.ext.linkcode configuration" +from docs.linkcode_github import generate_linkcode_resolve_fn +linkcode_resolve = generate_linkcode_resolve_fn(project, github_username, github_repository) diff --git a/docs/linkcode_github.py b/docs/linkcode_github.py new file mode 100644 index 00000000..cd385b8f --- /dev/null +++ b/docs/linkcode_github.py @@ -0,0 +1,62 @@ +## sphinx.ext.linkcode configuration +# Link code to Github source +# From: https://github.com/scikit-learn/scikit-learn/blob/main/doc/sphinxext/github_link.py + +import inspect +import os +import subprocess +import sys +from operator import attrgetter + + +def generate_linkcode_resolve_fn(package, github_username, github_repository): + + try: + revision = ( + subprocess.check_output("git rev-parse --short HEAD".split()).strip().decode("utf-8") + ) + except (subprocess.CalledProcessError, OSError): + print("Failed to execute git to get revision") + revision = None + + url_fmt = ( + f"https://github.com/{github_username}/{github_repository}/" + "blob/{revision}/src/{package}/{path}#L{lineno}" + ) + + def linkcode_resolve(domain, info): + if revision is None: + return + if domain not in ("py", "pyx"): + return + if not info.get("module") or not info.get("fullname"): + return + + class_name = info["fullname"].split(".")[0] + module = __import__(info["module"], fromlist=[class_name]) + obj = attrgetter(info["fullname"])(module) + + # Unwrap the object to get the correct source + # file in case that is wrapped by a decorator + obj = inspect.unwrap(obj) + + try: + fn = inspect.getsourcefile(obj) + except Exception: + fn = None + if not fn: + try: + fn = inspect.getsourcefile(sys.modules[obj.__module__]) + except Exception: + fn = None + if not fn: + return + + fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__)) + try: + lineno = inspect.getsourcelines(obj)[1] + except Exception: + lineno = "" + return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno) + + return linkcode_resolve diff --git a/docs/source/examples/accelerate.md b/docs/source/examples/accelerate.md index 33ec5ba5..6c7b8e39 100644 --- a/docs/source/examples/accelerate.md +++ b/docs/source/examples/accelerate.md @@ -1,5 +1,5 @@ # Accelerate ```{eval-rst} -.. literalinclude:: ./scripts/accelerate_example.py +.. literalinclude:: ./scripts/accelerate_train.py ``` diff --git a/docs/source/examples/deepspeed.md b/docs/source/examples/deepspeed.md index f3b21a3f..15cf1d7a 100644 --- a/docs/source/examples/deepspeed.md +++ b/docs/source/examples/deepspeed.md @@ -1,6 +1,6 @@ # DeepSpeed ```{eval-rst} -.. literalinclude:: ./scripts/deepspeed_example.py +.. literalinclude:: ./scripts/deepspeed_train.py .. literalinclude:: ./scripts/deepspeed_config.json ``` diff --git a/docs/source/examples/lightning.md b/docs/source/examples/lightning.md index 95786b49..b4d5d925 100644 --- a/docs/source/examples/lightning.md +++ b/docs/source/examples/lightning.md @@ -1,5 +1,5 @@ # Pytorch Lightning ```{eval-rst} -.. literalinclude:: ./scripts/lightning_example.py +.. literalinclude:: ./scripts/lightning_train.py ``` diff --git a/docs/source/examples/scripts/accelerate_example.py b/docs/source/examples/scripts/accelerate_train.py similarity index 100% rename from docs/source/examples/scripts/accelerate_example.py rename to docs/source/examples/scripts/accelerate_train.py diff --git a/docs/source/examples/scripts/deepspeed_example.py b/docs/source/examples/scripts/deepspeed_train.py similarity index 100% rename from docs/source/examples/scripts/deepspeed_example.py rename to docs/source/examples/scripts/deepspeed_train.py diff --git a/docs/source/examples/lightning_example.py b/docs/source/examples/scripts/lightning_train.py similarity index 100% rename from docs/source/examples/lightning_example.py rename to docs/source/examples/scripts/lightning_train.py diff --git a/docs/source/examples/transformers_help.txt b/docs/source/examples/scripts/transformers_help.txt similarity index 100% rename from docs/source/examples/transformers_help.txt rename to docs/source/examples/scripts/transformers_help.txt diff --git a/docs/source/examples/transformers.md b/docs/source/examples/transformers.md index 8196b4d6..3cafb8ef 100644 --- a/docs/source/examples/transformers.md +++ b/docs/source/examples/transformers.md @@ -14,7 +14,7 @@ The following arguments are required: `--model.name`, `--dataset.tokenizer-name`

python transformers_train.py --help

(expand)
```{eval-rst} - .. literalinclude:: ./transformers_help.txt + .. literalinclude:: ./scripts/transformers_help.txt ``` diff --git a/pyproject.toml b/pyproject.toml index 09bd0ae0..dc3a877d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,7 +30,7 @@ dependencies = [ "numpy>=1.20", ] [dependency-groups] -dev = ["ruff", "pyright", "pytest", "build", "twine"] +dev = ["ruff", "pyright[nodejs]", "pytest", "build", "twine"] dev-extras = ["submitit", "transformers"] docs = ["sphinx==7.4.7", "furo==2024.8.6", "myst-parser==3.0.1", "sphinx-autodoc2==0.5.0", "sphinx-toolbox==3.8.1"] diff --git a/uv.lock b/uv.lock index 4c6efd26..c9287d45 100644 --- a/uv.lock +++ b/uv.lock @@ -1,9 +1,10 @@ version = 1 requires-python = ">=3.9" resolution-markers = [ - "python_full_version < '3.12'", - "python_full_version == '3.12.*'", "python_full_version >= '3.13'", + "python_full_version == '3.12.*'", + "python_full_version >= '3.10' and python_full_version < '3.12'", + "python_full_version < '3.10'", ] [[package]] @@ -45,14 +46,14 @@ wheels = [ [[package]] name = "astroid" -version = "3.3.5" +version = "3.3.8" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/38/1e/326fb1d3d83a3bb77c9f9be29d31f2901e35acb94b0605c3f2e5085047f9/astroid-3.3.5.tar.gz", hash = "sha256:5cfc40ae9f68311075d27ef68a4841bdc5cc7f6cf86671b49f00607d30188e2d", size = 397229 } +sdist = { url = "https://files.pythonhosted.org/packages/80/c5/5c83c48bbf547f3dd8b587529db7cf5a265a3368b33e85e76af8ff6061d3/astroid-3.3.8.tar.gz", hash = "sha256:a88c7994f914a4ea8572fac479459f4955eeccc877be3f2d959a33273b0cf40b", size = 398196 } wheels = [ - { url = "https://files.pythonhosted.org/packages/41/30/624365383fa4a40329c0f0bbbc151abc4a64e30dfc110fc8f6e2afcd02bb/astroid-3.3.5-py3-none-any.whl", hash = "sha256:a9d1c946ada25098d790e079ba2a1b112157278f3fb7e718ae6a9252f5835dc8", size = 274586 }, + { url = "https://files.pythonhosted.org/packages/07/28/0bc8a17d6cd4cc3c79ae41b7105a2b9a327c110e5ddd37a8a27b29a5c8a2/astroid-3.3.8-py3-none-any.whl", hash = "sha256:187ccc0c248bfbba564826c26f070494f7bc964fd286b6d9fff4420e55de828c", size = 275153 }, ] [[package]] @@ -69,11 +70,11 @@ wheels = [ [[package]] name = "babel" -version = "2.16.0" +version = "2.17.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2a/74/f1bc80f23eeba13393b7222b11d95ca3af2c1e28edca18af487137eefed9/babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316", size = 9348104 } +sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852 } wheels = [ - { url = "https://files.pythonhosted.org/packages/ed/20/bc79bc575ba2e2a7f70e8a1155618bb1301eaa5132a8271373a6903f73f8/babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b", size = 9587599 }, + { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537 }, ] [[package]] @@ -87,48 +88,47 @@ wheels = [ [[package]] name = "bcrypt" -version = "4.2.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e4/7e/d95e7d96d4828e965891af92e43b52a4cd3395dc1c1ef4ee62748d0471d0/bcrypt-4.2.0.tar.gz", hash = "sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221", size = 24294 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a9/81/4e8f5bc0cd947e91fb720e1737371922854da47a94bc9630454e7b2845f8/bcrypt-4.2.0-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:096a15d26ed6ce37a14c1ac1e48119660f21b24cba457f160a4b830f3fe6b5cb", size = 471568 }, - { url = "https://files.pythonhosted.org/packages/05/d2/1be1e16aedec04bcf8d0156e01b987d16a2063d38e64c3f28030a3427d61/bcrypt-4.2.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c02d944ca89d9b1922ceb8a46460dd17df1ba37ab66feac4870f6862a1533c00", size = 277372 }, - { url = "https://files.pythonhosted.org/packages/e3/96/7a654027638ad9b7589effb6db77eb63eba64319dfeaf9c0f4ca953e5f76/bcrypt-4.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d84cf6d877918620b687b8fd1bf7781d11e8a0998f576c7aa939776b512b98d", size = 273488 }, - { url = "https://files.pythonhosted.org/packages/46/54/dc7b58abeb4a3d95bab653405935e27ba32f21b812d8ff38f271fb6f7f55/bcrypt-4.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:1bb429fedbe0249465cdd85a58e8376f31bb315e484f16e68ca4c786dcc04291", size = 277759 }, - { url = "https://files.pythonhosted.org/packages/ac/be/da233c5f11fce3f8adec05e8e532b299b64833cc962f49331cdd0e614fa9/bcrypt-4.2.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:655ea221910bcac76ea08aaa76df427ef8625f92e55a8ee44fbf7753dbabb328", size = 273796 }, - { url = "https://files.pythonhosted.org/packages/b0/b8/8b4add88d55a263cf1c6b8cf66c735280954a04223fcd2880120cc767ac3/bcrypt-4.2.0-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:1ee38e858bf5d0287c39b7a1fc59eec64bbf880c7d504d3a06a96c16e14058e7", size = 311082 }, - { url = "https://files.pythonhosted.org/packages/7b/76/2aa660679abbdc7f8ee961552e4bb6415a81b303e55e9374533f22770203/bcrypt-4.2.0-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0da52759f7f30e83f1e30a888d9163a81353ef224d82dc58eb5bb52efcabc399", size = 305912 }, - { url = "https://files.pythonhosted.org/packages/00/03/2af7c45034aba6002d4f2b728c1a385676b4eab7d764410e34fd768009f2/bcrypt-4.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3698393a1b1f1fd5714524193849d0c6d524d33523acca37cd28f02899285060", size = 325185 }, - { url = "https://files.pythonhosted.org/packages/dc/5d/6843443ce4ab3af40bddb6c7c085ed4a8418b3396f7a17e60e6d9888416c/bcrypt-4.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:762a2c5fb35f89606a9fde5e51392dad0cd1ab7ae64149a8b935fe8d79dd5ed7", size = 335188 }, - { url = "https://files.pythonhosted.org/packages/cb/4c/ff8ca83d816052fba36def1d24e97d9a85739b9bbf428c0d0ecd296a07c8/bcrypt-4.2.0-cp37-abi3-win32.whl", hash = "sha256:5a1e8aa9b28ae28020a3ac4b053117fb51c57a010b9f969603ed885f23841458", size = 156481 }, - { url = "https://files.pythonhosted.org/packages/65/f1/e09626c88a56cda488810fb29d5035f1662873777ed337880856b9d204ae/bcrypt-4.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:8f6ede91359e5df88d1f5c1ef47428a4420136f3ce97763e31b86dd8280fbdf5", size = 151336 }, - { url = "https://files.pythonhosted.org/packages/96/86/8c6a84daed4dd878fbab094400c9174c43d9b838ace077a2f8ee8bc3ae12/bcrypt-4.2.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:c52aac18ea1f4a4f65963ea4f9530c306b56ccd0c6f8c8da0c06976e34a6e841", size = 472414 }, - { url = "https://files.pythonhosted.org/packages/f6/05/e394515f4e23c17662e5aeb4d1859b11dc651be01a3bd03c2e919a155901/bcrypt-4.2.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bbbfb2734f0e4f37c5136130405332640a1e46e6b23e000eeff2ba8d005da68", size = 277599 }, - { url = "https://files.pythonhosted.org/packages/4b/3b/ad784eac415937c53da48983756105d267b91e56aa53ba8a1b2014b8d930/bcrypt-4.2.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3413bd60460f76097ee2e0a493ccebe4a7601918219c02f503984f0a7ee0aebe", size = 273491 }, - { url = "https://files.pythonhosted.org/packages/cc/14/b9ff8e0218bee95e517b70e91130effb4511e8827ac1ab00b4e30943a3f6/bcrypt-4.2.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8d7bb9c42801035e61c109c345a28ed7e84426ae4865511eb82e913df18f58c2", size = 277934 }, - { url = "https://files.pythonhosted.org/packages/3e/d0/31938bb697600a04864246acde4918c4190a938f891fd11883eaaf41327a/bcrypt-4.2.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3d3a6d28cb2305b43feac298774b997e372e56c7c7afd90a12b3dc49b189151c", size = 273804 }, - { url = "https://files.pythonhosted.org/packages/e7/c3/dae866739989e3f04ae304e1201932571708cb292a28b2f1b93283e2dcd8/bcrypt-4.2.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9c1c4ad86351339c5f320ca372dfba6cb6beb25e8efc659bedd918d921956bae", size = 311275 }, - { url = "https://files.pythonhosted.org/packages/5d/2c/019bc2c63c6125ddf0483ee7d914a405860327767d437913942b476e9c9b/bcrypt-4.2.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:27fe0f57bb5573104b5a6de5e4153c60814c711b29364c10a75a54bb6d7ff48d", size = 306355 }, - { url = "https://files.pythonhosted.org/packages/75/fe/9e137727f122bbe29771d56afbf4e0dbc85968caa8957806f86404a5bfe1/bcrypt-4.2.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:8ac68872c82f1add6a20bd489870c71b00ebacd2e9134a8aa3f98a0052ab4b0e", size = 325381 }, - { url = "https://files.pythonhosted.org/packages/1a/d4/586b9c18a327561ea4cd336ff4586cca1a7aa0f5ee04e23a8a8bb9ca64f1/bcrypt-4.2.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:cb2a8ec2bc07d3553ccebf0746bbf3d19426d1c6d1adbd4fa48925f66af7b9e8", size = 335685 }, - { url = "https://files.pythonhosted.org/packages/24/55/1a7127faf4576138bb278b91e9c75307490178979d69c8e6e273f74b974f/bcrypt-4.2.0-cp39-abi3-win32.whl", hash = "sha256:77800b7147c9dc905db1cba26abe31e504d8247ac73580b4aa179f98e6608f34", size = 155857 }, - { url = "https://files.pythonhosted.org/packages/1c/2a/c74052e54162ec639266d91539cca7cbf3d1d3b8b36afbfeaee0ea6a1702/bcrypt-4.2.0-cp39-abi3-win_amd64.whl", hash = "sha256:61ed14326ee023917ecd093ee6ef422a72f3aec6f07e21ea5f10622b735538a9", size = 151717 }, - { url = "https://files.pythonhosted.org/packages/09/97/01026e7b1b7f8aeb41514408eca1137c0f8aef9938335e3bc713f82c282e/bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:39e1d30c7233cfc54f5c3f2c825156fe044efdd3e0b9d309512cc514a263ec2a", size = 275924 }, - { url = "https://files.pythonhosted.org/packages/ca/46/03eb26ea3e9c12ca18d1f3bf06199f7d72ce52e68f2a1ebcfd8acff9c472/bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db", size = 272242 }, - { url = "https://files.pythonhosted.org/packages/73/5a/811c3c7af3be99888f39ee8845ddf849d2a03a83049d63ece5dfb4612f4d/bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:1ff39b78a52cf03fdf902635e4c81e544714861ba3f0efc56558979dd4f09170", size = 278107 }, - { url = "https://files.pythonhosted.org/packages/8b/79/76a139d1b9f11aa4afcb7ceb882d2e81003667681711f2fe8a302c4c10ca/bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:373db9abe198e8e2c70d12b479464e0d5092cc122b20ec504097b5f2297ed184", size = 274081 }, +version = "4.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/56/8c/dd696962612e4cd83c40a9e6b3db77bfe65a830f4b9af44098708584686c/bcrypt-4.2.1.tar.gz", hash = "sha256:6765386e3ab87f569b276988742039baab087b2cdb01e809d74e74503c2faafe", size = 24427 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/ca/e17b08c523adb93d5f07a226b2bd45a7c6e96b359e31c1e99f9db58cb8c3/bcrypt-4.2.1-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:1340411a0894b7d3ef562fb233e4b6ed58add185228650942bdc885362f32c17", size = 489982 }, + { url = "https://files.pythonhosted.org/packages/6a/be/e7c6e0fd6087ee8fc6d77d8d9e817e9339d879737509019b9a9012a1d96f/bcrypt-4.2.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1ee315739bc8387aa36ff127afc99120ee452924e0df517a8f3e4c0187a0f5f", size = 273108 }, + { url = "https://files.pythonhosted.org/packages/d6/53/ac084b7d985aee1a5f2b086d501f550862596dbf73220663b8c17427e7f2/bcrypt-4.2.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dbd0747208912b1e4ce730c6725cb56c07ac734b3629b60d4398f082ea718ad", size = 278733 }, + { url = "https://files.pythonhosted.org/packages/8e/ab/b8710a3d6231c587e575ead0b1c45bb99f5454f9f579c9d7312c17b069cc/bcrypt-4.2.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:aaa2e285be097050dba798d537b6efd9b698aa88eef52ec98d23dcd6d7cf6fea", size = 273856 }, + { url = "https://files.pythonhosted.org/packages/9d/e5/2fd1ea6395358ffdfd4afe370d5b52f71408f618f781772a48971ef3b92b/bcrypt-4.2.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:76d3e352b32f4eeb34703370e370997065d28a561e4a18afe4fef07249cb4396", size = 279067 }, + { url = "https://files.pythonhosted.org/packages/4e/ef/f2cb7a0f7e1ed800a604f8ab256fb0afcf03c1540ad94ff771ce31e794aa/bcrypt-4.2.1-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:b7703ede632dc945ed1172d6f24e9f30f27b1b1a067f32f68bf169c5f08d0425", size = 306851 }, + { url = "https://files.pythonhosted.org/packages/de/cb/578b0023c6a5ca16a177b9044ba6bd6032277bd3ef020fb863eccd22e49b/bcrypt-4.2.1-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:89df2aea2c43be1e1fa066df5f86c8ce822ab70a30e4c210968669565c0f4685", size = 310793 }, + { url = "https://files.pythonhosted.org/packages/98/bc/9d501ee9d754f63d4b1086b64756c284facc3696de9b556c146279a124a5/bcrypt-4.2.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:04e56e3fe8308a88b77e0afd20bec516f74aecf391cdd6e374f15cbed32783d6", size = 320957 }, + { url = "https://files.pythonhosted.org/packages/a1/25/2ec4ce5740abc43182bfc064b9acbbf5a493991246985e8b2bfe231ead64/bcrypt-4.2.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:cfdf3d7530c790432046c40cda41dfee8c83e29482e6a604f8930b9930e94139", size = 339958 }, + { url = "https://files.pythonhosted.org/packages/6d/64/fd67788f64817727897d31e9cdeeeba3941eaad8540733c05c7eac4aa998/bcrypt-4.2.1-cp37-abi3-win32.whl", hash = "sha256:adadd36274510a01f33e6dc08f5824b97c9580583bd4487c564fc4617b328005", size = 160912 }, + { url = "https://files.pythonhosted.org/packages/00/8f/fe834eaa54abbd7cab8607e5020fa3a0557e929555b9e4ca404b4adaab06/bcrypt-4.2.1-cp37-abi3-win_amd64.whl", hash = "sha256:8c458cd103e6c5d1d85cf600e546a639f234964d0228909d8f8dbeebff82d526", size = 152981 }, + { url = "https://files.pythonhosted.org/packages/4a/57/23b46933206daf5384b5397d9878746d2249fe9d45efaa8e1467c87d3048/bcrypt-4.2.1-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:8ad2f4528cbf0febe80e5a3a57d7a74e6635e41af1ea5675282a33d769fba413", size = 489842 }, + { url = "https://files.pythonhosted.org/packages/fd/28/3ea8a39ddd4938b6c6b6136816d72ba5e659e2d82b53d843c8c53455ac4d/bcrypt-4.2.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:909faa1027900f2252a9ca5dfebd25fc0ef1417943824783d1c8418dd7d6df4a", size = 272500 }, + { url = "https://files.pythonhosted.org/packages/77/7f/b43622999f5d4de06237a195ac5501ac83516adf571b907228cd14bac8fe/bcrypt-4.2.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cde78d385d5e93ece5479a0a87f73cd6fa26b171c786a884f955e165032b262c", size = 278368 }, + { url = "https://files.pythonhosted.org/packages/50/68/f2e3959014b4d8874c747e6e171d46d3e63a3a39aaca8417a8d837eda0a8/bcrypt-4.2.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:533e7f3bcf2f07caee7ad98124fab7499cb3333ba2274f7a36cf1daee7409d99", size = 273335 }, + { url = "https://files.pythonhosted.org/packages/d6/c3/4b4bad4da852924427c651589d464ad1aa624f94dd904ddda8493b0a35e5/bcrypt-4.2.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:687cf30e6681eeda39548a93ce9bfbb300e48b4d445a43db4298d2474d2a1e54", size = 278614 }, + { url = "https://files.pythonhosted.org/packages/6e/5a/ee107961e84c41af2ac201d0460f962b6622ff391255ffd46429e9e09dc1/bcrypt-4.2.1-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:041fa0155c9004eb98a232d54da05c0b41d4b8e66b6fc3cb71b4b3f6144ba837", size = 306464 }, + { url = "https://files.pythonhosted.org/packages/5c/72/916e14fa12d2b1d1fc6c26ea195337419da6dd23d0bf53ac61ef3739e5c5/bcrypt-4.2.1-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f85b1ffa09240c89aa2e1ae9f3b1c687104f7b2b9d2098da4e923f1b7082d331", size = 310674 }, + { url = "https://files.pythonhosted.org/packages/97/92/3dc76d8bfa23300591eec248e950f85bd78eb608c96bd4747ce4cc06acdb/bcrypt-4.2.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c6f5fa3775966cca251848d4d5393ab016b3afed251163c1436fefdec3b02c84", size = 320577 }, + { url = "https://files.pythonhosted.org/packages/5d/ab/a6c0da5c2cf86600f74402a72b06dfe365e1a1d30783b1bbeec460fd57d1/bcrypt-4.2.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:807261df60a8b1ccd13e6599c779014a362ae4e795f5c59747f60208daddd96d", size = 339836 }, + { url = "https://files.pythonhosted.org/packages/b4/b4/e75b6e9a72a030a04362034022ebe317c5b735d04db6ad79237101ae4a5c/bcrypt-4.2.1-cp39-abi3-win32.whl", hash = "sha256:b588af02b89d9fad33e5f98f7838bf590d6d692df7153647724a7f20c186f6bf", size = 160911 }, + { url = "https://files.pythonhosted.org/packages/76/b9/d51d34e6cd6d887adddb28a8680a1d34235cc45b9d6e238ce39b98199ca0/bcrypt-4.2.1-cp39-abi3-win_amd64.whl", hash = "sha256:e84e0e6f8e40a242b11bce56c313edc2be121cec3e0ec2d76fce01f6af33c07c", size = 153078 }, + { url = "https://files.pythonhosted.org/packages/4e/6e/7193067042de23af3d71882f898c8c0bd2b18e6ee44a4f76e395dfadb5a8/bcrypt-4.2.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:76132c176a6d9953cdc83c296aeaed65e1a708485fd55abf163e0d9f8f16ce0e", size = 270069 }, + { url = "https://files.pythonhosted.org/packages/3b/05/2546085c6dc07a45627460a39e6291b82382b434fff2bd0167ff3bc31eb1/bcrypt-4.2.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e158009a54c4c8bc91d5e0da80920d048f918c61a581f0a63e4e93bb556d362f", size = 274652 }, ] [[package]] name = "beautifulsoup4" -version = "4.12.3" +version = "4.13.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "soupsieve" }, + { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b3/ca/824b1195773ce6166d388573fc106ce56d4a805bd7427b624e063596ec58/beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051", size = 581181 } +sdist = { url = "https://files.pythonhosted.org/packages/f0/3c/adaf39ce1fb4afdd21b611e3d530b183bb7759c9b673d60db0e347fd4439/beautifulsoup4-4.13.3.tar.gz", hash = "sha256:1bd32405dacc920b42b83ba01644747ed77456a65760e285fbc47633ceddaf8b", size = 619516 } wheels = [ - { url = "https://files.pythonhosted.org/packages/b1/fe/e8c672695b37eecc5cbf43e1d0638d88d66ba3a44c4d321c796f4e59167f/beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed", size = 147925 }, + { url = "https://files.pythonhosted.org/packages/f9/49/6abb616eb3cbab6a7cca303dc02fdf3836de2e0b834bf966a7f5271a34d8/beautifulsoup4-4.13.3-py3-none-any.whl", hash = "sha256:99045d7d3f08f91f0d656bc9b7efbae189426cd913d830294a15eefa0ea4df16", size = 186015 }, ] [[package]] @@ -149,15 +149,15 @@ wheels = [ [[package]] name = "cachecontrol" -version = "0.14.1" +version = "0.14.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "msgpack" }, { name = "requests" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d2/23/db12e0b6b241e33f77f7cce01a06b4cc6f8071728656cc0ea262d2a14dad/cachecontrol-0.14.1.tar.gz", hash = "sha256:06ef916a1e4eb7dba9948cdfc9c76e749db2e02104a9a1277e8b642591a0f717", size = 28928 } +sdist = { url = "https://files.pythonhosted.org/packages/b7/a4/3390ac4dfa1773f661c8780368018230e8207ec4fd3800d2c0c3adee4456/cachecontrol-0.14.2.tar.gz", hash = "sha256:7d47d19f866409b98ff6025b6a0fca8e4c791fb31abbd95f622093894ce903a2", size = 28832 } wheels = [ - { url = "https://files.pythonhosted.org/packages/f1/aa/481eb52af52aae093c61c181f2308779973ffd6f0f5f6c0881b2138f3087/cachecontrol-0.14.1-py3-none-any.whl", hash = "sha256:65e3abd62b06382ce3894df60dde9e0deb92aeb734724f68fa4f3b91e97206b9", size = 22085 }, + { url = "https://files.pythonhosted.org/packages/c8/63/baffb44ca6876e7b5fc8fe17b24a7c07bf479d604a592182db9af26ea366/cachecontrol-0.14.2-py3-none-any.whl", hash = "sha256:ebad2091bf12d0d200dfc2464330db638c5deb41d546f6d7aca079e87290f3b0", size = 21780 }, ] [package.optional-dependencies] @@ -167,11 +167,11 @@ filecache = [ [[package]] name = "certifi" -version = "2024.8.30" +version = "2025.1.31" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b0/ee/9b19140fe824b367c04c5e1b369942dd754c4c5462d5674002f75c4dedc1/certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9", size = 168507 } +sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 } wheels = [ - { url = "https://files.pythonhosted.org/packages/12/90/3c9ff0512038035f59d279fddeb79f5f1eccd8859f06d6163c58798b9487/certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", size = 167321 }, + { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 }, ] [[package]] @@ -245,95 +245,85 @@ wheels = [ [[package]] name = "charset-normalizer" -version = "3.4.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f2/4f/e1808dc01273379acc506d18f1504eb2d299bd4131743b9fc54d7be4df1e/charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e", size = 106620 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/69/8b/825cc84cf13a28bfbcba7c416ec22bf85a9584971be15b21dd8300c65b7f/charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6", size = 196363 }, - { url = "https://files.pythonhosted.org/packages/23/81/d7eef6a99e42c77f444fdd7bc894b0ceca6c3a95c51239e74a722039521c/charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b", size = 125639 }, - { url = "https://files.pythonhosted.org/packages/21/67/b4564d81f48042f520c948abac7079356e94b30cb8ffb22e747532cf469d/charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99", size = 120451 }, - { url = "https://files.pythonhosted.org/packages/c2/72/12a7f0943dd71fb5b4e7b55c41327ac0a1663046a868ee4d0d8e9c369b85/charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca", size = 140041 }, - { url = "https://files.pythonhosted.org/packages/67/56/fa28c2c3e31217c4c52158537a2cf5d98a6c1e89d31faf476c89391cd16b/charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d", size = 150333 }, - { url = "https://files.pythonhosted.org/packages/f9/d2/466a9be1f32d89eb1554cf84073a5ed9262047acee1ab39cbaefc19635d2/charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7", size = 142921 }, - { url = "https://files.pythonhosted.org/packages/f8/01/344ec40cf5d85c1da3c1f57566c59e0c9b56bcc5566c08804a95a6cc8257/charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3", size = 144785 }, - { url = "https://files.pythonhosted.org/packages/73/8b/2102692cb6d7e9f03b9a33a710e0164cadfce312872e3efc7cfe22ed26b4/charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907", size = 146631 }, - { url = "https://files.pythonhosted.org/packages/d8/96/cc2c1b5d994119ce9f088a9a0c3ebd489d360a2eb058e2c8049f27092847/charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b", size = 140867 }, - { url = "https://files.pythonhosted.org/packages/c9/27/cde291783715b8ec30a61c810d0120411844bc4c23b50189b81188b273db/charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912", size = 149273 }, - { url = "https://files.pythonhosted.org/packages/3a/a4/8633b0fc1a2d1834d5393dafecce4a1cc56727bfd82b4dc18fc92f0d3cc3/charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95", size = 152437 }, - { url = "https://files.pythonhosted.org/packages/64/ea/69af161062166b5975ccbb0961fd2384853190c70786f288684490913bf5/charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e", size = 150087 }, - { url = "https://files.pythonhosted.org/packages/3b/fd/e60a9d9fd967f4ad5a92810138192f825d77b4fa2a557990fd575a47695b/charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe", size = 145142 }, - { url = "https://files.pythonhosted.org/packages/6d/02/8cb0988a1e49ac9ce2eed1e07b77ff118f2923e9ebd0ede41ba85f2dcb04/charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc", size = 94701 }, - { url = "https://files.pythonhosted.org/packages/d6/20/f1d4670a8a723c46be695dff449d86d6092916f9e99c53051954ee33a1bc/charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749", size = 102191 }, - { url = "https://files.pythonhosted.org/packages/9c/61/73589dcc7a719582bf56aae309b6103d2762b526bffe189d635a7fcfd998/charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c", size = 193339 }, - { url = "https://files.pythonhosted.org/packages/77/d5/8c982d58144de49f59571f940e329ad6e8615e1e82ef84584c5eeb5e1d72/charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944", size = 124366 }, - { url = "https://files.pythonhosted.org/packages/bf/19/411a64f01ee971bed3231111b69eb56f9331a769072de479eae7de52296d/charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee", size = 118874 }, - { url = "https://files.pythonhosted.org/packages/4c/92/97509850f0d00e9f14a46bc751daabd0ad7765cff29cdfb66c68b6dad57f/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c", size = 138243 }, - { url = "https://files.pythonhosted.org/packages/e2/29/d227805bff72ed6d6cb1ce08eec707f7cfbd9868044893617eb331f16295/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6", size = 148676 }, - { url = "https://files.pythonhosted.org/packages/13/bc/87c2c9f2c144bedfa62f894c3007cd4530ba4b5351acb10dc786428a50f0/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea", size = 141289 }, - { url = "https://files.pythonhosted.org/packages/eb/5b/6f10bad0f6461fa272bfbbdf5d0023b5fb9bc6217c92bf068fa5a99820f5/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc", size = 142585 }, - { url = "https://files.pythonhosted.org/packages/3b/a0/a68980ab8a1f45a36d9745d35049c1af57d27255eff8c907e3add84cf68f/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5", size = 144408 }, - { url = "https://files.pythonhosted.org/packages/d7/a1/493919799446464ed0299c8eef3c3fad0daf1c3cd48bff9263c731b0d9e2/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594", size = 139076 }, - { url = "https://files.pythonhosted.org/packages/fb/9d/9c13753a5a6e0db4a0a6edb1cef7aee39859177b64e1a1e748a6e3ba62c2/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c", size = 146874 }, - { url = "https://files.pythonhosted.org/packages/75/d2/0ab54463d3410709c09266dfb416d032a08f97fd7d60e94b8c6ef54ae14b/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365", size = 150871 }, - { url = "https://files.pythonhosted.org/packages/8d/c9/27e41d481557be53d51e60750b85aa40eaf52b841946b3cdeff363105737/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129", size = 148546 }, - { url = "https://files.pythonhosted.org/packages/ee/44/4f62042ca8cdc0cabf87c0fc00ae27cd8b53ab68be3605ba6d071f742ad3/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236", size = 143048 }, - { url = "https://files.pythonhosted.org/packages/01/f8/38842422988b795220eb8038745d27a675ce066e2ada79516c118f291f07/charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99", size = 94389 }, - { url = "https://files.pythonhosted.org/packages/0b/6e/b13bd47fa9023b3699e94abf565b5a2f0b0be6e9ddac9812182596ee62e4/charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27", size = 101752 }, - { url = "https://files.pythonhosted.org/packages/d3/0b/4b7a70987abf9b8196845806198975b6aab4ce016632f817ad758a5aa056/charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6", size = 194445 }, - { url = "https://files.pythonhosted.org/packages/50/89/354cc56cf4dd2449715bc9a0f54f3aef3dc700d2d62d1fa5bbea53b13426/charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf", size = 125275 }, - { url = "https://files.pythonhosted.org/packages/fa/44/b730e2a2580110ced837ac083d8ad222343c96bb6b66e9e4e706e4d0b6df/charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db", size = 119020 }, - { url = "https://files.pythonhosted.org/packages/9d/e4/9263b8240ed9472a2ae7ddc3e516e71ef46617fe40eaa51221ccd4ad9a27/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1", size = 139128 }, - { url = "https://files.pythonhosted.org/packages/6b/e3/9f73e779315a54334240353eaea75854a9a690f3f580e4bd85d977cb2204/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03", size = 149277 }, - { url = "https://files.pythonhosted.org/packages/1a/cf/f1f50c2f295312edb8a548d3fa56a5c923b146cd3f24114d5adb7e7be558/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284", size = 142174 }, - { url = "https://files.pythonhosted.org/packages/16/92/92a76dc2ff3a12e69ba94e7e05168d37d0345fa08c87e1fe24d0c2a42223/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15", size = 143838 }, - { url = "https://files.pythonhosted.org/packages/a4/01/2117ff2b1dfc61695daf2babe4a874bca328489afa85952440b59819e9d7/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8", size = 146149 }, - { url = "https://files.pythonhosted.org/packages/f6/9b/93a332b8d25b347f6839ca0a61b7f0287b0930216994e8bf67a75d050255/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2", size = 140043 }, - { url = "https://files.pythonhosted.org/packages/ab/f6/7ac4a01adcdecbc7a7587767c776d53d369b8b971382b91211489535acf0/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719", size = 148229 }, - { url = "https://files.pythonhosted.org/packages/9d/be/5708ad18161dee7dc6a0f7e6cf3a88ea6279c3e8484844c0590e50e803ef/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631", size = 151556 }, - { url = "https://files.pythonhosted.org/packages/5a/bb/3d8bc22bacb9eb89785e83e6723f9888265f3a0de3b9ce724d66bd49884e/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b", size = 149772 }, - { url = "https://files.pythonhosted.org/packages/f7/fa/d3fc622de05a86f30beea5fc4e9ac46aead4731e73fd9055496732bcc0a4/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565", size = 144800 }, - { url = "https://files.pythonhosted.org/packages/9a/65/bdb9bc496d7d190d725e96816e20e2ae3a6fa42a5cac99c3c3d6ff884118/charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7", size = 94836 }, - { url = "https://files.pythonhosted.org/packages/3e/67/7b72b69d25b89c0b3cea583ee372c43aa24df15f0e0f8d3982c57804984b/charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9", size = 102187 }, - { url = "https://files.pythonhosted.org/packages/f3/89/68a4c86f1a0002810a27f12e9a7b22feb198c59b2f05231349fbce5c06f4/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114", size = 194617 }, - { url = "https://files.pythonhosted.org/packages/4f/cd/8947fe425e2ab0aa57aceb7807af13a0e4162cd21eee42ef5b053447edf5/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed", size = 125310 }, - { url = "https://files.pythonhosted.org/packages/5b/f0/b5263e8668a4ee9becc2b451ed909e9c27058337fda5b8c49588183c267a/charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250", size = 119126 }, - { url = "https://files.pythonhosted.org/packages/ff/6e/e445afe4f7fda27a533f3234b627b3e515a1b9429bc981c9a5e2aa5d97b6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920", size = 139342 }, - { url = "https://files.pythonhosted.org/packages/a1/b2/4af9993b532d93270538ad4926c8e37dc29f2111c36f9c629840c57cd9b3/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64", size = 149383 }, - { url = "https://files.pythonhosted.org/packages/fb/6f/4e78c3b97686b871db9be6f31d64e9264e889f8c9d7ab33c771f847f79b7/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23", size = 142214 }, - { url = "https://files.pythonhosted.org/packages/2b/c9/1c8fe3ce05d30c87eff498592c89015b19fade13df42850aafae09e94f35/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc", size = 144104 }, - { url = "https://files.pythonhosted.org/packages/ee/68/efad5dcb306bf37db7db338338e7bb8ebd8cf38ee5bbd5ceaaaa46f257e6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d", size = 146255 }, - { url = "https://files.pythonhosted.org/packages/0c/75/1ed813c3ffd200b1f3e71121c95da3f79e6d2a96120163443b3ad1057505/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88", size = 140251 }, - { url = "https://files.pythonhosted.org/packages/7d/0d/6f32255c1979653b448d3c709583557a4d24ff97ac4f3a5be156b2e6a210/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90", size = 148474 }, - { url = "https://files.pythonhosted.org/packages/ac/a0/c1b5298de4670d997101fef95b97ac440e8c8d8b4efa5a4d1ef44af82f0d/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b", size = 151849 }, - { url = "https://files.pythonhosted.org/packages/04/4f/b3961ba0c664989ba63e30595a3ed0875d6790ff26671e2aae2fdc28a399/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d", size = 149781 }, - { url = "https://files.pythonhosted.org/packages/d8/90/6af4cd042066a4adad58ae25648a12c09c879efa4849c705719ba1b23d8c/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482", size = 144970 }, - { url = "https://files.pythonhosted.org/packages/cc/67/e5e7e0cbfefc4ca79025238b43cdf8a2037854195b37d6417f3d0895c4c2/charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67", size = 94973 }, - { url = "https://files.pythonhosted.org/packages/65/97/fc9bbc54ee13d33dc54a7fcf17b26368b18505500fc01e228c27b5222d80/charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b", size = 102308 }, - { url = "https://files.pythonhosted.org/packages/54/2f/28659eee7f5d003e0f5a3b572765bf76d6e0fe6601ab1f1b1dd4cba7e4f1/charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa", size = 196326 }, - { url = "https://files.pythonhosted.org/packages/d1/18/92869d5c0057baa973a3ee2af71573be7b084b3c3d428fe6463ce71167f8/charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a", size = 125614 }, - { url = "https://files.pythonhosted.org/packages/d6/27/327904c5a54a7796bb9f36810ec4173d2df5d88b401d2b95ef53111d214e/charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0", size = 120450 }, - { url = "https://files.pythonhosted.org/packages/a4/23/65af317914a0308495133b2d654cf67b11bbd6ca16637c4e8a38f80a5a69/charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a", size = 140135 }, - { url = "https://files.pythonhosted.org/packages/f2/41/6190102ad521a8aa888519bb014a74251ac4586cde9b38e790901684f9ab/charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242", size = 150413 }, - { url = "https://files.pythonhosted.org/packages/7b/ab/f47b0159a69eab9bd915591106859f49670c75f9a19082505ff16f50efc0/charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b", size = 142992 }, - { url = "https://files.pythonhosted.org/packages/28/89/60f51ad71f63aaaa7e51a2a2ad37919985a341a1d267070f212cdf6c2d22/charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62", size = 144871 }, - { url = "https://files.pythonhosted.org/packages/0c/48/0050550275fea585a6e24460b42465020b53375017d8596c96be57bfabca/charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0", size = 146756 }, - { url = "https://files.pythonhosted.org/packages/dc/b5/47f8ee91455946f745e6c9ddbb0f8f50314d2416dd922b213e7d5551ad09/charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd", size = 141034 }, - { url = "https://files.pythonhosted.org/packages/84/79/5c731059ebab43e80bf61fa51666b9b18167974b82004f18c76378ed31a3/charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be", size = 149434 }, - { url = "https://files.pythonhosted.org/packages/ca/f3/0719cd09fc4dc42066f239cb3c48ced17fc3316afca3e2a30a4756fe49ab/charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d", size = 152443 }, - { url = "https://files.pythonhosted.org/packages/f7/0e/c6357297f1157c8e8227ff337e93fd0a90e498e3d6ab96b2782204ecae48/charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3", size = 150294 }, - { url = "https://files.pythonhosted.org/packages/54/9a/acfa96dc4ea8c928040b15822b59d0863d6e1757fba8bd7de3dc4f761c13/charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742", size = 145314 }, - { url = "https://files.pythonhosted.org/packages/73/1c/b10a63032eaebb8d7bcb8544f12f063f41f5f463778ac61da15d9985e8b6/charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2", size = 94724 }, - { url = "https://files.pythonhosted.org/packages/c5/77/3a78bf28bfaa0863f9cfef278dbeadf55efe064eafff8c7c424ae3c4c1bf/charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca", size = 102159 }, - { url = "https://files.pythonhosted.org/packages/bf/9b/08c0432272d77b04803958a4598a51e2a4b51c06640af8b8f0f908c18bf2/charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079", size = 49446 }, +version = "3.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/58/5580c1716040bc89206c77d8f74418caf82ce519aae06450393ca73475d1/charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de", size = 198013 }, + { url = "https://files.pythonhosted.org/packages/d0/11/00341177ae71c6f5159a08168bcb98c6e6d196d372c94511f9f6c9afe0c6/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176", size = 141285 }, + { url = "https://files.pythonhosted.org/packages/01/09/11d684ea5819e5a8f5100fb0b38cf8d02b514746607934134d31233e02c8/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037", size = 151449 }, + { url = "https://files.pythonhosted.org/packages/08/06/9f5a12939db324d905dc1f70591ae7d7898d030d7662f0d426e2286f68c9/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f", size = 143892 }, + { url = "https://files.pythonhosted.org/packages/93/62/5e89cdfe04584cb7f4d36003ffa2936681b03ecc0754f8e969c2becb7e24/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a", size = 146123 }, + { url = "https://files.pythonhosted.org/packages/a9/ac/ab729a15c516da2ab70a05f8722ecfccc3f04ed7a18e45c75bbbaa347d61/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a", size = 147943 }, + { url = "https://files.pythonhosted.org/packages/03/d2/3f392f23f042615689456e9a274640c1d2e5dd1d52de36ab8f7955f8f050/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247", size = 142063 }, + { url = "https://files.pythonhosted.org/packages/f2/e3/e20aae5e1039a2cd9b08d9205f52142329f887f8cf70da3650326670bddf/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408", size = 150578 }, + { url = "https://files.pythonhosted.org/packages/8d/af/779ad72a4da0aed925e1139d458adc486e61076d7ecdcc09e610ea8678db/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb", size = 153629 }, + { url = "https://files.pythonhosted.org/packages/c2/b6/7aa450b278e7aa92cf7732140bfd8be21f5f29d5bf334ae987c945276639/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d", size = 150778 }, + { url = "https://files.pythonhosted.org/packages/39/f4/d9f4f712d0951dcbfd42920d3db81b00dd23b6ab520419626f4023334056/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807", size = 146453 }, + { url = "https://files.pythonhosted.org/packages/49/2b/999d0314e4ee0cff3cb83e6bc9aeddd397eeed693edb4facb901eb8fbb69/charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f", size = 95479 }, + { url = "https://files.pythonhosted.org/packages/2d/ce/3cbed41cff67e455a386fb5e5dd8906cdda2ed92fbc6297921f2e4419309/charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f", size = 102790 }, + { url = "https://files.pythonhosted.org/packages/72/80/41ef5d5a7935d2d3a773e3eaebf0a9350542f2cab4eac59a7a4741fbbbbe/charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125", size = 194995 }, + { url = "https://files.pythonhosted.org/packages/7a/28/0b9fefa7b8b080ec492110af6d88aa3dea91c464b17d53474b6e9ba5d2c5/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1", size = 139471 }, + { url = "https://files.pythonhosted.org/packages/71/64/d24ab1a997efb06402e3fc07317e94da358e2585165930d9d59ad45fcae2/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3", size = 149831 }, + { url = "https://files.pythonhosted.org/packages/37/ed/be39e5258e198655240db5e19e0b11379163ad7070962d6b0c87ed2c4d39/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd", size = 142335 }, + { url = "https://files.pythonhosted.org/packages/88/83/489e9504711fa05d8dde1574996408026bdbdbd938f23be67deebb5eca92/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00", size = 143862 }, + { url = "https://files.pythonhosted.org/packages/c6/c7/32da20821cf387b759ad24627a9aca289d2822de929b8a41b6241767b461/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12", size = 145673 }, + { url = "https://files.pythonhosted.org/packages/68/85/f4288e96039abdd5aeb5c546fa20a37b50da71b5cf01e75e87f16cd43304/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77", size = 140211 }, + { url = "https://files.pythonhosted.org/packages/28/a3/a42e70d03cbdabc18997baf4f0227c73591a08041c149e710045c281f97b/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146", size = 148039 }, + { url = "https://files.pythonhosted.org/packages/85/e4/65699e8ab3014ecbe6f5c71d1a55d810fb716bbfd74f6283d5c2aa87febf/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd", size = 151939 }, + { url = "https://files.pythonhosted.org/packages/b1/82/8e9fe624cc5374193de6860aba3ea8070f584c8565ee77c168ec13274bd2/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6", size = 149075 }, + { url = "https://files.pythonhosted.org/packages/3d/7b/82865ba54c765560c8433f65e8acb9217cb839a9e32b42af4aa8e945870f/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8", size = 144340 }, + { url = "https://files.pythonhosted.org/packages/b5/b6/9674a4b7d4d99a0d2df9b215da766ee682718f88055751e1e5e753c82db0/charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b", size = 95205 }, + { url = "https://files.pythonhosted.org/packages/1e/ab/45b180e175de4402dcf7547e4fb617283bae54ce35c27930a6f35b6bef15/charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76", size = 102441 }, + { url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105 }, + { url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404 }, + { url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423 }, + { url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184 }, + { url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268 }, + { url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601 }, + { url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098 }, + { url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520 }, + { url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852 }, + { url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488 }, + { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192 }, + { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550 }, + { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785 }, + { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 }, + { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 }, + { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 }, + { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 }, + { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 }, + { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 }, + { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 }, + { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 }, + { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 }, + { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 }, + { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 }, + { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 }, + { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 }, + { url = "https://files.pythonhosted.org/packages/7f/c0/b913f8f02836ed9ab32ea643c6fe4d3325c3d8627cf6e78098671cafff86/charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41", size = 197867 }, + { url = "https://files.pythonhosted.org/packages/0f/6c/2bee440303d705b6fb1e2ec789543edec83d32d258299b16eed28aad48e0/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f", size = 141385 }, + { url = "https://files.pythonhosted.org/packages/3d/04/cb42585f07f6f9fd3219ffb6f37d5a39b4fd2db2355b23683060029c35f7/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2", size = 151367 }, + { url = "https://files.pythonhosted.org/packages/54/54/2412a5b093acb17f0222de007cc129ec0e0df198b5ad2ce5699355269dfe/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770", size = 143928 }, + { url = "https://files.pythonhosted.org/packages/5a/6d/e2773862b043dcf8a221342954f375392bb2ce6487bcd9f2c1b34e1d6781/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4", size = 146203 }, + { url = "https://files.pythonhosted.org/packages/b9/f8/ca440ef60d8f8916022859885f231abb07ada3c347c03d63f283bec32ef5/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537", size = 148082 }, + { url = "https://files.pythonhosted.org/packages/04/d2/42fd330901aaa4b805a1097856c2edf5095e260a597f65def493f4b8c833/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496", size = 142053 }, + { url = "https://files.pythonhosted.org/packages/9e/af/3a97a4fa3c53586f1910dadfc916e9c4f35eeada36de4108f5096cb7215f/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78", size = 150625 }, + { url = "https://files.pythonhosted.org/packages/26/ae/23d6041322a3556e4da139663d02fb1b3c59a23ab2e2b56432bd2ad63ded/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7", size = 153549 }, + { url = "https://files.pythonhosted.org/packages/94/22/b8f2081c6a77cb20d97e57e0b385b481887aa08019d2459dc2858ed64871/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6", size = 150945 }, + { url = "https://files.pythonhosted.org/packages/c7/0b/c5ec5092747f801b8b093cdf5610e732b809d6cb11f4c51e35fc28d1d389/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294", size = 146595 }, + { url = "https://files.pythonhosted.org/packages/0c/5a/0b59704c38470df6768aa154cc87b1ac7c9bb687990a1559dc8765e8627e/charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5", size = 95453 }, + { url = "https://files.pythonhosted.org/packages/85/2d/a9790237cb4d01a6d57afadc8573c8b73c609ade20b80f4cda30802009ee/charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765", size = 102811 }, + { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 }, ] [[package]] name = "cloudpickle" -version = "3.1.0" +version = "3.1.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/97/c7/f746cadd08c4c08129215cf1b984b632f9e579fc781301e63da9e85c76c1/cloudpickle-3.1.0.tar.gz", hash = "sha256:81a929b6e3c7335c863c771d673d105f02efdb89dfaba0c90495d1c64796601b", size = 66155 } +sdist = { url = "https://files.pythonhosted.org/packages/52/39/069100b84d7418bc358d81669d5748efb14b9cceacd2f9c75f550424132f/cloudpickle-3.1.1.tar.gz", hash = "sha256:b216fa8ae4019d5482a8ac3c95d8f6346115d8835911fd4aefd1a445e4242c64", size = 22113 } wheels = [ - { url = "https://files.pythonhosted.org/packages/48/41/e1d85ca3cab0b674e277c8c4f678cf66a91cd2cecf93df94353a606fe0db/cloudpickle-3.1.0-py3-none-any.whl", hash = "sha256:fe11acda67f61aaaec473e3afe030feb131d78a43461b718185363384f1ba12e", size = 22021 }, + { url = "https://files.pythonhosted.org/packages/7e/e8/64c37fadfc2816a7701fa8a6ed8d87327c7d54eacfbfb6edab14a2f2be75/cloudpickle-3.1.1-py3-none-any.whl", hash = "sha256:c8c5a44295039331ee9dad40ba100a9c7297b6f988e50e87ccdf3765a668350e", size = 20992 }, ] [[package]] @@ -347,39 +337,39 @@ wheels = [ [[package]] name = "cryptography" -version = "43.0.3" +version = "44.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0d/05/07b55d1fa21ac18c3a8c79f764e2514e6f6a9698f1be44994f5adf0d29db/cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805", size = 686989 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1f/f3/01fdf26701a26f4b4dbc337a26883ad5bccaa6f1bbbdd29cd89e22f18a1c/cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e", size = 6225303 }, - { url = "https://files.pythonhosted.org/packages/a3/01/4896f3d1b392025d4fcbecf40fdea92d3df8662123f6835d0af828d148fd/cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e", size = 3760905 }, - { url = "https://files.pythonhosted.org/packages/0a/be/f9a1f673f0ed4b7f6c643164e513dbad28dd4f2dcdf5715004f172ef24b6/cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f", size = 3977271 }, - { url = "https://files.pythonhosted.org/packages/4e/49/80c3a7b5514d1b416d7350830e8c422a4d667b6d9b16a9392ebfd4a5388a/cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6", size = 3746606 }, - { url = "https://files.pythonhosted.org/packages/0e/16/a28ddf78ac6e7e3f25ebcef69ab15c2c6be5ff9743dd0709a69a4f968472/cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18", size = 3986484 }, - { url = "https://files.pythonhosted.org/packages/01/f5/69ae8da70c19864a32b0315049866c4d411cce423ec169993d0434218762/cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd", size = 3852131 }, - { url = "https://files.pythonhosted.org/packages/fd/db/e74911d95c040f9afd3612b1f732e52b3e517cb80de8bf183be0b7d413c6/cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73", size = 4075647 }, - { url = "https://files.pythonhosted.org/packages/56/48/7b6b190f1462818b324e674fa20d1d5ef3e24f2328675b9b16189cbf0b3c/cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2", size = 2623873 }, - { url = "https://files.pythonhosted.org/packages/eb/b1/0ebff61a004f7f89e7b65ca95f2f2375679d43d0290672f7713ee3162aff/cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd", size = 3068039 }, - { url = "https://files.pythonhosted.org/packages/30/d5/c8b32c047e2e81dd172138f772e81d852c51f0f2ad2ae8a24f1122e9e9a7/cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984", size = 6222984 }, - { url = "https://files.pythonhosted.org/packages/2f/78/55356eb9075d0be6e81b59f45c7b48df87f76a20e73893872170471f3ee8/cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5", size = 3762968 }, - { url = "https://files.pythonhosted.org/packages/2a/2c/488776a3dc843f95f86d2f957ca0fc3407d0242b50bede7fad1e339be03f/cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4", size = 3977754 }, - { url = "https://files.pythonhosted.org/packages/7c/04/2345ca92f7a22f601a9c62961741ef7dd0127c39f7310dffa0041c80f16f/cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7", size = 3749458 }, - { url = "https://files.pythonhosted.org/packages/ac/25/e715fa0bc24ac2114ed69da33adf451a38abb6f3f24ec207908112e9ba53/cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405", size = 3988220 }, - { url = "https://files.pythonhosted.org/packages/21/ce/b9c9ff56c7164d8e2edfb6c9305045fbc0df4508ccfdb13ee66eb8c95b0e/cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16", size = 3853898 }, - { url = "https://files.pythonhosted.org/packages/2a/33/b3682992ab2e9476b9c81fff22f02c8b0a1e6e1d49ee1750a67d85fd7ed2/cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73", size = 4076592 }, - { url = "https://files.pythonhosted.org/packages/81/1e/ffcc41b3cebd64ca90b28fd58141c5f68c83d48563c88333ab660e002cd3/cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995", size = 2623145 }, - { url = "https://files.pythonhosted.org/packages/87/5c/3dab83cc4aba1f4b0e733e3f0c3e7d4386440d660ba5b1e3ff995feb734d/cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362", size = 3068026 }, - { url = "https://files.pythonhosted.org/packages/6f/db/d8b8a039483f25fc3b70c90bc8f3e1d4497a99358d610c5067bf3bd4f0af/cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c", size = 3144545 }, - { url = "https://files.pythonhosted.org/packages/93/90/116edd5f8ec23b2dc879f7a42443e073cdad22950d3c8ee834e3b8124543/cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3", size = 3679828 }, - { url = "https://files.pythonhosted.org/packages/d8/32/1e1d78b316aa22c0ba6493cc271c1c309969e5aa5c22c830a1d7ce3471e6/cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83", size = 3908132 }, - { url = "https://files.pythonhosted.org/packages/91/bb/cd2c13be3332e7af3cdf16154147952d39075b9f61ea5e6b5241bf4bf436/cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7", size = 2988811 }, - { url = "https://files.pythonhosted.org/packages/cc/fc/ff7c76afdc4f5933b5e99092528d4783d3d1b131960fc8b31eb38e076ca8/cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664", size = 3146844 }, - { url = "https://files.pythonhosted.org/packages/d7/29/a233efb3e98b13d9175dcb3c3146988ec990896c8fa07e8467cce27d5a80/cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08", size = 3681997 }, - { url = "https://files.pythonhosted.org/packages/c0/cf/c9eea7791b961f279fb6db86c3355cfad29a73141f46427af71852b23b95/cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa", size = 3905208 }, - { url = "https://files.pythonhosted.org/packages/21/ea/6c38ca546d5b6dab3874c2b8fc6b1739baac29bacdea31a8c6c0513b3cfa/cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff", size = 2989787 }, +sdist = { url = "https://files.pythonhosted.org/packages/91/4c/45dfa6829acffa344e3967d6006ee4ae8be57af746ae2eba1c431949b32c/cryptography-44.0.0.tar.gz", hash = "sha256:cd4e834f340b4293430701e772ec543b0fbe6c2dea510a5286fe0acabe153a02", size = 710657 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/09/8cc67f9b84730ad330b3b72cf867150744bf07ff113cda21a15a1c6d2c7c/cryptography-44.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:84111ad4ff3f6253820e6d3e58be2cc2a00adb29335d4cacb5ab4d4d34f2a123", size = 6541833 }, + { url = "https://files.pythonhosted.org/packages/7e/5b/3759e30a103144e29632e7cb72aec28cedc79e514b2ea8896bb17163c19b/cryptography-44.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15492a11f9e1b62ba9d73c210e2416724633167de94607ec6069ef724fad092", size = 3922710 }, + { url = "https://files.pythonhosted.org/packages/5f/58/3b14bf39f1a0cfd679e753e8647ada56cddbf5acebffe7db90e184c76168/cryptography-44.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831c3c4d0774e488fdc83a1923b49b9957d33287de923d58ebd3cec47a0ae43f", size = 4137546 }, + { url = "https://files.pythonhosted.org/packages/98/65/13d9e76ca19b0ba5603d71ac8424b5694415b348e719db277b5edc985ff5/cryptography-44.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:761817a3377ef15ac23cd7834715081791d4ec77f9297ee694ca1ee9c2c7e5eb", size = 3915420 }, + { url = "https://files.pythonhosted.org/packages/b1/07/40fe09ce96b91fc9276a9ad272832ead0fddedcba87f1190372af8e3039c/cryptography-44.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3c672a53c0fb4725a29c303be906d3c1fa99c32f58abe008a82705f9ee96f40b", size = 4154498 }, + { url = "https://files.pythonhosted.org/packages/75/ea/af65619c800ec0a7e4034207aec543acdf248d9bffba0533342d1bd435e1/cryptography-44.0.0-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:4ac4c9f37eba52cb6fbeaf5b59c152ea976726b865bd4cf87883a7e7006cc543", size = 3932569 }, + { url = "https://files.pythonhosted.org/packages/c7/af/d1deb0c04d59612e3d5e54203159e284d3e7a6921e565bb0eeb6269bdd8a/cryptography-44.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ed3534eb1090483c96178fcb0f8893719d96d5274dfde98aa6add34614e97c8e", size = 4016721 }, + { url = "https://files.pythonhosted.org/packages/bd/69/7ca326c55698d0688db867795134bdfac87136b80ef373aaa42b225d6dd5/cryptography-44.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f3f6fdfa89ee2d9d496e2c087cebef9d4fcbb0ad63c40e821b39f74bf48d9c5e", size = 4240915 }, + { url = "https://files.pythonhosted.org/packages/ef/d4/cae11bf68c0f981e0413906c6dd03ae7fa864347ed5fac40021df1ef467c/cryptography-44.0.0-cp37-abi3-win32.whl", hash = "sha256:eb33480f1bad5b78233b0ad3e1b0be21e8ef1da745d8d2aecbb20671658b9053", size = 2757925 }, + { url = "https://files.pythonhosted.org/packages/64/b1/50d7739254d2002acae64eed4fc43b24ac0cc44bf0a0d388d1ca06ec5bb1/cryptography-44.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:abc998e0c0eee3c8a1904221d3f67dcfa76422b23620173e28c11d3e626c21bd", size = 3202055 }, + { url = "https://files.pythonhosted.org/packages/11/18/61e52a3d28fc1514a43b0ac291177acd1b4de00e9301aaf7ef867076ff8a/cryptography-44.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:660cb7312a08bc38be15b696462fa7cc7cd85c3ed9c576e81f4dc4d8b2b31591", size = 6542801 }, + { url = "https://files.pythonhosted.org/packages/1a/07/5f165b6c65696ef75601b781a280fc3b33f1e0cd6aa5a92d9fb96c410e97/cryptography-44.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1923cb251c04be85eec9fda837661c67c1049063305d6be5721643c22dd4e2b7", size = 3922613 }, + { url = "https://files.pythonhosted.org/packages/28/34/6b3ac1d80fc174812486561cf25194338151780f27e438526f9c64e16869/cryptography-44.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:404fdc66ee5f83a1388be54300ae978b2efd538018de18556dde92575e05defc", size = 4137925 }, + { url = "https://files.pythonhosted.org/packages/d0/c7/c656eb08fd22255d21bc3129625ed9cd5ee305f33752ef2278711b3fa98b/cryptography-44.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c5eb858beed7835e5ad1faba59e865109f3e52b3783b9ac21e7e47dc5554e289", size = 3915417 }, + { url = "https://files.pythonhosted.org/packages/ef/82/72403624f197af0db6bac4e58153bc9ac0e6020e57234115db9596eee85d/cryptography-44.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f53c2c87e0fb4b0c00fa9571082a057e37690a8f12233306161c8f4b819960b7", size = 4155160 }, + { url = "https://files.pythonhosted.org/packages/a2/cd/2f3c440913d4329ade49b146d74f2e9766422e1732613f57097fea61f344/cryptography-44.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e6fc8a08e116fb7c7dd1f040074c9d7b51d74a8ea40d4df2fc7aa08b76b9e6c", size = 3932331 }, + { url = "https://files.pythonhosted.org/packages/7f/df/8be88797f0a1cca6e255189a57bb49237402b1880d6e8721690c5603ac23/cryptography-44.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2436114e46b36d00f8b72ff57e598978b37399d2786fd39793c36c6d5cb1c64", size = 4017372 }, + { url = "https://files.pythonhosted.org/packages/af/36/5ccc376f025a834e72b8e52e18746b927f34e4520487098e283a719c205e/cryptography-44.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a01956ddfa0a6790d594f5b34fc1bfa6098aca434696a03cfdbe469b8ed79285", size = 4239657 }, + { url = "https://files.pythonhosted.org/packages/46/b0/f4f7d0d0bcfbc8dd6296c1449be326d04217c57afb8b2594f017eed95533/cryptography-44.0.0-cp39-abi3-win32.whl", hash = "sha256:eca27345e1214d1b9f9490d200f9db5a874479be914199194e746c893788d417", size = 2758672 }, + { url = "https://files.pythonhosted.org/packages/97/9b/443270b9210f13f6ef240eff73fd32e02d381e7103969dc66ce8e89ee901/cryptography-44.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:708ee5f1bafe76d041b53a4f95eb28cdeb8d18da17e597d46d7833ee59b97ede", size = 3202071 }, + { url = "https://files.pythonhosted.org/packages/77/d4/fea74422326388bbac0c37b7489a0fcb1681a698c3b875959430ba550daa/cryptography-44.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37d76e6863da3774cd9db5b409a9ecfd2c71c981c38788d3fcfaf177f447b731", size = 3338857 }, + { url = "https://files.pythonhosted.org/packages/1a/aa/ba8a7467c206cb7b62f09b4168da541b5109838627f582843bbbe0235e8e/cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f677e1268c4e23420c3acade68fac427fffcb8d19d7df95ed7ad17cdef8404f4", size = 3850615 }, + { url = "https://files.pythonhosted.org/packages/89/fa/b160e10a64cc395d090105be14f399b94e617c879efd401188ce0fea39ee/cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f5e7cb1e5e56ca0933b4873c0220a78b773b24d40d186b6738080b73d3d0a756", size = 4081622 }, + { url = "https://files.pythonhosted.org/packages/47/8f/20ff0656bb0cf7af26ec1d01f780c5cfbaa7666736063378c5f48558b515/cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:8b3e6eae66cf54701ee7d9c83c30ac0a1e3fa17be486033000f2a73a12ab507c", size = 3867546 }, + { url = "https://files.pythonhosted.org/packages/38/d9/28edf32ee2fcdca587146bcde90102a7319b2f2c690edfa627e46d586050/cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:be4ce505894d15d5c5037167ffb7f0ae90b7be6f2a98f9a5c3442395501c32fa", size = 4090937 }, + { url = "https://files.pythonhosted.org/packages/cc/9d/37e5da7519de7b0b070a3fedd4230fe76d50d2a21403e0f2153d70ac4163/cryptography-44.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:62901fb618f74d7d81bf408c8719e9ec14d863086efe4185afd07c352aee1d2c", size = 3128774 }, ] [[package]] @@ -405,14 +395,14 @@ wheels = [ [[package]] name = "deprecated" -version = "1.2.14" +version = "1.2.18" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "wrapt" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/92/14/1e41f504a246fc224d2ac264c227975427a85caf37c3979979edb9b1b232/Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3", size = 2974416 } +sdist = { url = "https://files.pythonhosted.org/packages/98/97/06afe62762c9a8a86af0cfb7bfdab22a43ad17138b07af5b1a58442690a2/deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d", size = 2928744 } wheels = [ - { url = "https://files.pythonhosted.org/packages/20/8d/778b7d51b981a96554f29136cd59ca7880bf58094338085bcf2a979a0e6a/Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c", size = 9561 }, + { url = "https://files.pythonhosted.org/packages/6e/c6/ac0b6c1e2d138f1002bcf799d330bd6d85084fece321e662a14223794041/Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec", size = 9998 }, ] [[package]] @@ -476,20 +466,20 @@ wheels = [ [[package]] name = "filelock" -version = "3.16.1" +version = "3.17.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/9d/db/3ef5bb276dae18d6ec2124224403d1d67bccdbefc17af4cc8f553e341ab1/filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435", size = 18037 } +sdist = { url = "https://files.pythonhosted.org/packages/dc/9c/0b15fb47b464e1b663b1acd1253a062aa5feecb07d4e597daea542ebd2b5/filelock-3.17.0.tar.gz", hash = "sha256:ee4e77401ef576ebb38cd7f13b9b28893194acc20a8e68e18730ba9c0e54660e", size = 18027 } wheels = [ - { url = "https://files.pythonhosted.org/packages/b9/f8/feced7779d755758a52d1f6635d990b8d98dc0a29fa568bbe0625f18fdf3/filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0", size = 16163 }, + { url = "https://files.pythonhosted.org/packages/89/ec/00d68c4ddfedfe64159999e5f8a98fb8442729a63e2077eb9dcd89623d27/filelock-3.17.0-py3-none-any.whl", hash = "sha256:533dc2f7ba78dc2f0f531fc6c4940addf7b70a481e269a5a3b93be94ffbe8338", size = 16164 }, ] [[package]] name = "fsspec" -version = "2024.10.0" +version = "2025.2.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a0/52/f16a068ebadae42526484c31f4398e62962504e5724a8ba5dc3409483df2/fsspec-2024.10.0.tar.gz", hash = "sha256:eda2d8a4116d4f2429db8550f2457da57279247dd930bb12f821b58391359493", size = 286853 } +sdist = { url = "https://files.pythonhosted.org/packages/b5/79/68612ed99700e6413de42895aa725463e821a6b3be75c87fcce1b4af4c70/fsspec-2025.2.0.tar.gz", hash = "sha256:1c24b16eaa0a1798afa0337aa0db9b256718ab2a89c425371f5628d22c3b6afd", size = 292283 } wheels = [ - { url = "https://files.pythonhosted.org/packages/c6/b2/454d6e7f0158951d8a78c2e1eb4f69ae81beb8dca5fee9809c6c99e9d0d0/fsspec-2024.10.0-py3-none-any.whl", hash = "sha256:03b9a6785766a4de40368b88906366755e2819e758b83705c88cd7cb5fe81871", size = 179641 }, + { url = "https://files.pythonhosted.org/packages/e2/94/758680531a00d06e471ef649e4ec2ed6bf185356a7f9fbfbb7368a40bd49/fsspec-2025.2.0-py3-none-any.whl", hash = "sha256:9de2ad9ce1f85e1931858535bc882543171d197001a0a5eb2ddc04f1781ab95b", size = 184484 }, ] [[package]] @@ -522,7 +512,7 @@ wheels = [ [[package]] name = "huggingface-hub" -version = "0.26.2" +version = "0.28.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, @@ -533,9 +523,21 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d5/a8/882ae5d1cfa7c9c5be32feee4cee56d9873078913953423e47a756da110d/huggingface_hub-0.26.2.tar.gz", hash = "sha256:b100d853465d965733964d123939ba287da60a547087783ddff8a323f340332b", size = 375621 } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ce/a734204aaae6c35a22f9956ebcd8d8708ae5b842e15d6f42bd6f49e634a4/huggingface_hub-0.28.1.tar.gz", hash = "sha256:893471090c98e3b6efbdfdacafe4052b20b84d59866fb6f54c33d9af18c303ae", size = 387074 } wheels = [ - { url = "https://files.pythonhosted.org/packages/60/bf/cea0b9720c32fa01b0c4ec4b16b9f4ae34ca106b202ebbae9f03ab98cd8f/huggingface_hub-0.26.2-py3-none-any.whl", hash = "sha256:98c2a5a8e786c7b2cb6fdeb2740893cba4d53e312572ed3d8afafda65b128c46", size = 447536 }, + { url = "https://files.pythonhosted.org/packages/ea/da/6c2bea5327b640920267d3bf2c9fc114cfbd0a5de234d81cda80cc9e33c8/huggingface_hub-0.28.1-py3-none-any.whl", hash = "sha256:aa6b9a3ffdae939b72c464dbb0d7f99f56e649b55c3d52406f49e0a5a620c0a7", size = 464068 }, +] + +[[package]] +name = "id" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/11/102da08f88412d875fa2f1a9a469ff7ad4c874b0ca6fed0048fe385bdb3d/id-1.5.0.tar.gz", hash = "sha256:292cb8a49eacbbdbce97244f47a97b4c62540169c976552e497fd57df0734c1d", size = 15237 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9f/cb/18326d2d89ad3b0dd143da971e77afd1e6ca6674f1b1c3df4b6bec6279fc/id-1.5.0-py3-none-any.whl", hash = "sha256:f1434e1cef91f2cbb8a4ec64663d5a23b9ed43ef44c4c957d02583d61714c658", size = 13611 }, ] [[package]] @@ -558,14 +560,14 @@ wheels = [ [[package]] name = "importlib-metadata" -version = "8.5.0" +version = "8.6.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "zipp" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/cd/12/33e59336dca5be0c398a7482335911a33aa0e20776128f038019f1a95f1b/importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7", size = 55304 } +sdist = { url = "https://files.pythonhosted.org/packages/33/08/c1395a292bb23fd03bdf572a1357c5a733d3eecbab877641ceacab23db6e/importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580", size = 55767 } wheels = [ - { url = "https://files.pythonhosted.org/packages/a0/d9/a1e041c5e7caa9a05c925f4bdbdfb7f006d1f74996af53467bc394c97be7/importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b", size = 26514 }, + { url = "https://files.pythonhosted.org/packages/79/9d/0fb148dc4d6fa4a7dd1d8378168d9b4cd8d4560a6fbf6f0121c5fc34eb68/importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e", size = 26971 }, ] [[package]] @@ -633,19 +635,19 @@ wheels = [ [[package]] name = "jinja2" -version = "3.1.4" +version = "3.1.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markupsafe" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ed/55/39036716d19cab0747a5020fc7e907f362fbf48c984b14e62127f7e68e5d/jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369", size = 240245 } +sdist = { url = "https://files.pythonhosted.org/packages/af/92/b3130cbbf5591acf9ade8708c365f3238046ac7cb8ccba6e81abccb0ccff/jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb", size = 244674 } wheels = [ - { url = "https://files.pythonhosted.org/packages/31/80/3a54838c3fb461f6fec263ebf3a3a41771bd05190238de3486aae8540c36/jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d", size = 133271 }, + { url = "https://files.pythonhosted.org/packages/bd/0f/2ba5fbcd631e3e88689309dbe978c5769e883e4b84ebfe7da30b43275c5a/jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb", size = 134596 }, ] [[package]] name = "keyring" -version = "25.5.0" +version = "25.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "importlib-metadata", marker = "python_full_version < '3.12'" }, @@ -656,9 +658,9 @@ dependencies = [ { name = "pywin32-ctypes", marker = "sys_platform == 'win32'" }, { name = "secretstorage", marker = "sys_platform == 'linux'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f6/24/64447b13df6a0e2797b586dad715766d756c932ce8ace7f67bd384d76ae0/keyring-25.5.0.tar.gz", hash = "sha256:4c753b3ec91717fe713c4edd522d625889d8973a349b0e582622f49766de58e6", size = 62675 } +sdist = { url = "https://files.pythonhosted.org/packages/70/09/d904a6e96f76ff214be59e7aa6ef7190008f52a0ab6689760a98de0bf37d/keyring-25.6.0.tar.gz", hash = "sha256:0b39998aa941431eb3d9b0d4b2460bc773b9df6fed7621c2dfb291a7e0187a66", size = 62750 } wheels = [ - { url = "https://files.pythonhosted.org/packages/32/c9/353c156fa2f057e669106e5d6bcdecf85ef8d3536ce68ca96f18dc7b6d6f/keyring-25.5.0-py3-none-any.whl", hash = "sha256:e67f8ac32b04be4714b42fe84ce7dad9c40985b9ca827c592cc303e7c26d9741", size = 39096 }, + { url = "https://files.pythonhosted.org/packages/d3/32/da7f44bcb1105d3e88a0b74ebdca50c59121d2ddf71c9e34ba47df7f3a56/keyring-25.6.0-py3-none-any.whl", hash = "sha256:552a3f7af126ece7ed5c89753650eec89c7eaae8617d0aa4d9ad2b75111266bd", size = 39085 }, ] [[package]] @@ -764,11 +766,11 @@ wheels = [ [[package]] name = "more-itertools" -version = "10.5.0" +version = "10.6.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/51/78/65922308c4248e0eb08ebcbe67c95d48615cc6f27854b6f2e57143e9178f/more-itertools-10.5.0.tar.gz", hash = "sha256:5482bfef7849c25dc3c6dd53a6173ae4795da2a41a80faea6700d9f5846c5da6", size = 121020 } +sdist = { url = "https://files.pythonhosted.org/packages/88/3b/7fa1fe835e2e93fd6d7b52b2f95ae810cf5ba133e1845f726f5a992d62c2/more-itertools-10.6.0.tar.gz", hash = "sha256:2cd7fad1009c31cc9fb6a035108509e6547547a7a738374f10bd49a09eb3ee3b", size = 125009 } wheels = [ - { url = "https://files.pythonhosted.org/packages/48/7e/3a64597054a70f7c86eb0a7d4fc315b8c1ab932f64883a297bdffeb5f967/more_itertools-10.5.0-py3-none-any.whl", hash = "sha256:037b0d3203ce90cca8ab1defbbdac29d5f993fc20131f3664dc8d6acfa872aef", size = 60952 }, + { url = "https://files.pythonhosted.org/packages/23/62/0fe302c6d1be1c777cab0616e6302478251dfbf9055ad426f5d0def75c89/more_itertools-10.6.0-py3-none-any.whl", hash = "sha256:6eb054cb4b6db1473f6e15fcc676a08e4732548acd47c708f0e179c2c7c01e89", size = 63038 }, ] [[package]] @@ -873,32 +875,57 @@ wheels = [ name = "networkx" version = "3.2.1" source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] sdist = { url = "https://files.pythonhosted.org/packages/c4/80/a84676339aaae2f1cfdf9f418701dd634aef9cc76f708ef55c36ff39c3ca/networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6", size = 2073928 } wheels = [ { url = "https://files.pythonhosted.org/packages/d5/f0/8fbc882ca80cf077f1b246c0e3c3465f7f415439bdea6b899f6b19f61f70/networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2", size = 1647772 }, ] [[package]] -name = "nh3" -version = "0.2.18" +name = "networkx" +version = "3.4.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/62/73/10df50b42ddb547a907deeb2f3c9823022580a7a47281e8eae8e003a9639/nh3-0.2.18.tar.gz", hash = "sha256:94a166927e53972a9698af9542ace4e38b9de50c34352b962f4d9a7d4c927af4", size = 15028 } +resolution-markers = [ + "python_full_version >= '3.13'", + "python_full_version == '3.12.*'", + "python_full_version >= '3.10' and python_full_version < '3.12'", +] +sdist = { url = "https://files.pythonhosted.org/packages/fd/1d/06475e1cd5264c0b870ea2cc6fdb3e37177c1e565c43f56ff17a10e3937f/networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1", size = 2151368 } wheels = [ - { url = "https://files.pythonhosted.org/packages/b3/89/1daff5d9ba5a95a157c092c7c5f39b8dd2b1ddb4559966f808d31cfb67e0/nh3-0.2.18-cp37-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:14c5a72e9fe82aea5fe3072116ad4661af5cf8e8ff8fc5ad3450f123e4925e86", size = 1374474 }, - { url = "https://files.pythonhosted.org/packages/2c/b6/42fc3c69cabf86b6b81e4c051a9b6e249c5ba9f8155590222c2622961f58/nh3-0.2.18-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:7b7c2a3c9eb1a827d42539aa64091640bd275b81e097cd1d8d82ef91ffa2e811", size = 694573 }, - { url = "https://files.pythonhosted.org/packages/45/b9/833f385403abaf0023c6547389ec7a7acf141ddd9d1f21573723a6eab39a/nh3-0.2.18-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c64511469005058cd17cc1537578eac40ae9f7200bedcfd1fc1a05f4f8c200", size = 844082 }, - { url = "https://files.pythonhosted.org/packages/05/2b/85977d9e11713b5747595ee61f381bc820749daf83f07b90b6c9964cf932/nh3-0.2.18-cp37-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0411beb0589eacb6734f28d5497ca2ed379eafab8ad8c84b31bb5c34072b7164", size = 782460 }, - { url = "https://files.pythonhosted.org/packages/72/f2/5c894d5265ab80a97c68ca36f25c8f6f0308abac649aaf152b74e7e854a8/nh3-0.2.18-cp37-abi3-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5f36b271dae35c465ef5e9090e1fdaba4a60a56f0bb0ba03e0932a66f28b9189", size = 879827 }, - { url = "https://files.pythonhosted.org/packages/ab/a7/375afcc710dbe2d64cfbd69e31f82f3e423d43737258af01f6a56d844085/nh3-0.2.18-cp37-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34c03fa78e328c691f982b7c03d4423bdfd7da69cd707fe572f544cf74ac23ad", size = 841080 }, - { url = "https://files.pythonhosted.org/packages/c2/a8/3bb02d0c60a03ad3a112b76c46971e9480efa98a8946677b5a59f60130ca/nh3-0.2.18-cp37-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19aaba96e0f795bd0a6c56291495ff59364f4300d4a39b29a0abc9cb3774a84b", size = 924144 }, - { url = "https://files.pythonhosted.org/packages/1b/63/6ab90d0e5225ab9780f6c9fb52254fa36b52bb7c188df9201d05b647e5e1/nh3-0.2.18-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de3ceed6e661954871d6cd78b410213bdcb136f79aafe22aa7182e028b8c7307", size = 769192 }, - { url = "https://files.pythonhosted.org/packages/a4/17/59391c28580e2c32272761629893e761442fc7666da0b1cdb479f3b67b88/nh3-0.2.18-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6955369e4d9f48f41e3f238a9e60f9410645db7e07435e62c6a9ea6135a4907f", size = 791042 }, - { url = "https://files.pythonhosted.org/packages/a3/da/0c4e282bc3cff4a0adf37005fa1fb42257673fbc1bbf7d1ff639ec3d255a/nh3-0.2.18-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f0eca9ca8628dbb4e916ae2491d72957fdd35f7a5d326b7032a345f111ac07fe", size = 1010073 }, - { url = "https://files.pythonhosted.org/packages/de/81/c291231463d21da5f8bba82c8167a6d6893cc5419b0639801ee5d3aeb8a9/nh3-0.2.18-cp37-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:3a157ab149e591bb638a55c8c6bcb8cdb559c8b12c13a8affaba6cedfe51713a", size = 1029782 }, - { url = "https://files.pythonhosted.org/packages/63/1d/842fed85cf66c973be0aed8770093d6a04741f65e2c388ddd4c07fd3296e/nh3-0.2.18-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:c8b3a1cebcba9b3669ed1a84cc65bf005728d2f0bc1ed2a6594a992e817f3a50", size = 942504 }, - { url = "https://files.pythonhosted.org/packages/eb/61/73a007c74c37895fdf66e0edcd881f5eaa17a348ff02f4bb4bc906d61085/nh3-0.2.18-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:36c95d4b70530b320b365659bb5034341316e6a9b30f0b25fa9c9eff4c27a204", size = 941541 }, - { url = "https://files.pythonhosted.org/packages/78/48/54a788fc9428e481b2f58e0cd8564f6c74ffb6e9ef73d39e8acbeae8c629/nh3-0.2.18-cp37-abi3-win32.whl", hash = "sha256:a7f1b5b2c15866f2db413a3649a8fe4fd7b428ae58be2c0f6bca5eefd53ca2be", size = 573750 }, - { url = "https://files.pythonhosted.org/packages/26/8d/53c5b19c4999bdc6ba95f246f4ef35ca83d7d7423e5e38be43ad66544e5d/nh3-0.2.18-cp37-abi3-win_amd64.whl", hash = "sha256:8ce0f819d2f1933953fca255db2471ad58184a60508f03e6285e5114b6254844", size = 579012 }, + { url = "https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f", size = 1723263 }, +] + +[[package]] +name = "nh3" +version = "0.2.20" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/46/f2/eb781d94c7855e9129cbbdd3ab09a470441e4176a82a396ae1df270a7333/nh3-0.2.20.tar.gz", hash = "sha256:9705c42d7ff88a0bea546c82d7fe5e59135e3d3f057e485394f491248a1f8ed5", size = 17489 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/65/d31d93b6d1e5fe80d0cc18f0b96eaa561edfa0a15a6ef6b0fce50202a931/nh3-0.2.20-cp313-cp313t-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:e1061a4ab6681f6bdf72b110eea0c4e1379d57c9de937db3be4202f7ad6043db", size = 1202187 }, + { url = "https://files.pythonhosted.org/packages/b4/ae/5b03bf198e06921454012e4b9a51e676d26fd37d9fdc1f29371a0b380487/nh3-0.2.20-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb4254b1dac4a1ee49919a5b3f1caf9803ea8dada1816d9e8289e63d3cd0dd9a", size = 737822 }, + { url = "https://files.pythonhosted.org/packages/0a/53/a12dffb6ee3772deba82eb5997667fc835afd2e813d1f4080d8738f29eec/nh3-0.2.20-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0ae9cbd713524cdb81e64663d0d6aae26f678db9f2cd9db0bf162606f1f9f20c", size = 756643 }, + { url = "https://files.pythonhosted.org/packages/d0/0c/6cd2c5ac3e6e31f2a28721e8e2a924cb6b05ad054bf787bd1816ffd40b96/nh3-0.2.20-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e1f7370b4e14cc03f5ae141ef30a1caf81fa5787711f80be9081418dd9eb79d2", size = 923415 }, + { url = "https://files.pythonhosted.org/packages/64/f0/229a6c8b81b86ba22d8e7f27ade62cb2fcfb987e570f49944fdd8490a76a/nh3-0.2.20-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:ac4d27dc836a476efffc6eb661994426b8b805c951b29c9cf2ff36bc9ad58bc5", size = 994959 }, + { url = "https://files.pythonhosted.org/packages/75/e3/62ae3d3b658739ee15b129356fe6d4c4bc8ab235d7bf2e0d2794d64f7bc6/nh3-0.2.20-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:4fd2e9248725ebcedac3997a8d3da0d90a12a28c9179c6ba51f1658938ac30d0", size = 915777 }, + { url = "https://files.pythonhosted.org/packages/45/bd/8405d03371e335f02eb72e09dcf73307f8fd3095e4165cec6836346fe3db/nh3-0.2.20-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f7d564871833ddbe54df3aa59053b1110729d3a800cb7628ae8f42adb3d75208", size = 908614 }, + { url = "https://files.pythonhosted.org/packages/ee/f8/5d977f09cf82c1f22a864375f471db111530fc79c88efdf0659fe6d3d6bc/nh3-0.2.20-cp313-cp313t-win32.whl", hash = "sha256:d2a176fd4306b6f0f178a3f67fac91bd97a3a8d8fafb771c9b9ef675ba5c8886", size = 540482 }, + { url = "https://files.pythonhosted.org/packages/c5/f4/e34afe5fd8bed1920eac2974c9c853f548b4b65c139444285ffd2a68495d/nh3-0.2.20-cp313-cp313t-win_amd64.whl", hash = "sha256:6ed834c68452a600f517dd3e1534dbfaff1f67f98899fecf139a055a25d99150", size = 541302 }, + { url = "https://files.pythonhosted.org/packages/92/08/5e3b61eed1bc0efeb330ddc5cf5194f28a0b7be7943aa20bd44cfe14650b/nh3-0.2.20-cp38-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:76e2f603b30c02ff6456b233a83fc377dedab6a50947b04e960a6b905637b776", size = 1202141 }, + { url = "https://files.pythonhosted.org/packages/29/d2/3377f8006c71e95e007b07b5bfcac22c9de4744ca3efb23b396d3deb9581/nh3-0.2.20-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:181063c581defe683bd4bb78188ac9936d208aebbc74c7f7c16b6a32ae2ebb38", size = 760699 }, + { url = "https://files.pythonhosted.org/packages/37/d7/7077f925d7d680d53dcb6e18a4af13d1a7da59761c06c193bfa249a7470a/nh3-0.2.20-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:231addb7643c952cd6d71f1c8702d703f8fe34afcb20becb3efb319a501a12d7", size = 747353 }, + { url = "https://files.pythonhosted.org/packages/cb/59/6b2f32af477aae81f1454a7f6ef490ebc3c22dd9e1370e73fcfe243dc07a/nh3-0.2.20-cp38-abi3-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:1b9a8340a0aab991c68a5ca938d35ef4a8a3f4bf1b455da8855a40bee1fa0ace", size = 854125 }, + { url = "https://files.pythonhosted.org/packages/5b/f2/c3d2f7b801477b8b387b51fbefd16dc7ade888aeac547f18ba0558fd6f48/nh3-0.2.20-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10317cd96fe4bbd4eb6b95f3920b71c902157ad44fed103fdcde43e3b8ee8be6", size = 817453 }, + { url = "https://files.pythonhosted.org/packages/42/4d/f7e3a35506a0eba6eedafc21ad52773985511eb838812e9f96354831ad3c/nh3-0.2.20-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8698db4c04b140800d1a1cd3067fda399e36e1e2b8fc1fe04292a907350a3e9b", size = 891694 }, + { url = "https://files.pythonhosted.org/packages/e6/0e/c499453c296fb40366e3069cd68fde77a10f0a30a17b9d3b491eb3ebc5bf/nh3-0.2.20-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3eb04b9c3deb13c3a375ea39fd4a3c00d1f92e8fb2349f25f1e3e4506751774b", size = 744388 }, + { url = "https://files.pythonhosted.org/packages/18/67/c3de8022ba2719bdbbdd3704d1e32dbc7d3f8ac8646247711645fc90d051/nh3-0.2.20-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92f3f1c4f47a2c6f3ca7317b1d5ced05bd29556a75d3a4e2715652ae9d15c05d", size = 764831 }, + { url = "https://files.pythonhosted.org/packages/f0/14/a4ea40e2439717d11c3104fc2dc0ac412301b7aeb81d6a3d0e6505c77e7d/nh3-0.2.20-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ddefa9fd6794a87e37d05827d299d4b53a3ec6f23258101907b96029bfef138a", size = 923334 }, + { url = "https://files.pythonhosted.org/packages/ed/ae/e8ee8afaf67903dd304f390056d1ea620327524e2ad66127a331b14d5d98/nh3-0.2.20-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:ce3731c8f217685d33d9268362e5b4f770914e922bba94d368ab244a59a6c397", size = 994873 }, + { url = "https://files.pythonhosted.org/packages/20/b5/02122cfe3b36cf0ba0fcd73a04fd462e1f7a9d91b456f6e0b70e46df21c7/nh3-0.2.20-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:09f037c02fc2c43b211ff1523de32801dcfb0918648d8e651c36ef890f1731ec", size = 915707 }, + { url = "https://files.pythonhosted.org/packages/47/d3/5df43cc3570cdc9eb1dc79a39191f89fedf8bcefd8d30a161ff1dffb146c/nh3-0.2.20-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:813f1c8012dd64c990514b795508abb90789334f76a561fa0fd4ca32d2275330", size = 908539 }, + { url = "https://files.pythonhosted.org/packages/4f/fd/aa000f6c76a832c488eac26f20d2e8a221ba2b965efce692f14ebc4290bf/nh3-0.2.20-cp38-abi3-win32.whl", hash = "sha256:47b2946c0e13057855209daeffb45dc910bd0c55daf10190bb0b4b60e2999784", size = 540439 }, + { url = "https://files.pythonhosted.org/packages/19/31/d65594efd3b42b1de2335d576eb77525691fc320dbf8617948ee05c008e5/nh3-0.2.20-cp38-abi3-win_amd64.whl", hash = "sha256:da87573f03084edae8eb87cfe811ec338606288f81d333c07d2a9a0b9b976c0b", size = 541249 }, ] [[package]] @@ -910,10 +937,29 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314 }, ] +[[package]] +name = "nodejs-wheel-binaries" +version = "22.13.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5d/c5/1af2fc54fcc18f4a99426b46f18832a04f755ee340019e1be536187c1e1c/nodejs_wheel_binaries-22.13.1.tar.gz", hash = "sha256:a0c15213c9c3383541be4400a30959883868ce5da9cebb3d63ddc7fe61459308", size = 8053 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/e9/b0dd118e0fd4eabe1ec9c3d9a68df4d811282e8837b811d804f23742e117/nodejs_wheel_binaries-22.13.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:e4f64d0e26600d51cbdd98a6718a19c2d1b8c7538e9e353e95a634a06a8e1a58", size = 51015650 }, + { url = "https://files.pythonhosted.org/packages/cc/a6/9ba835f5d4f3f6b1f01191e7ac0874871f9743de5c42a5a9a54e67c2e2a6/nodejs_wheel_binaries-22.13.1-py2.py3-none-macosx_11_0_x86_64.whl", hash = "sha256:afcb40484bb02f23137f838014724604ae183fd767b30da95b0be1510a40c06d", size = 51814957 }, + { url = "https://files.pythonhosted.org/packages/0d/2e/a430207e5f22bd3dcffb81acbddf57ee4108b9e2b0f99a5578dc2c1ff7fc/nodejs_wheel_binaries-22.13.1-py2.py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fc88c98eebabfc36b5270a4ab974a2682746931567ca76a5ca49c54482bbb51", size = 57148437 }, + { url = "https://files.pythonhosted.org/packages/97/f4/5731b6f0c8af434619b4f1b8fd895bc33fca60168cd68133e52841872114/nodejs_wheel_binaries-22.13.1-py2.py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b9f75ea8f5e3e5416256fcb00a98cbe14c8d3b6dcaf17da29c4ade5723026d8", size = 57634451 }, + { url = "https://files.pythonhosted.org/packages/49/28/83166f7e39812e9ef99cfa3e722c54e32dd9de6a1290f3216c2e5d1f4957/nodejs_wheel_binaries-22.13.1-py2.py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:94608702ef6c389d32e89ff3b7a925cb5dedaf55b5d98bd0c4fb3450a8b6d1c1", size = 58794510 }, + { url = "https://files.pythonhosted.org/packages/f7/64/4832ec26d0a7ca7a5574df265d85c6832f9a624024511fc34958227ad740/nodejs_wheel_binaries-22.13.1-py2.py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:53a40d0269689aa2eaf2e261cbe5ec256644bc56aae0201ef344b7d8f40ccc79", size = 59738596 }, + { url = "https://files.pythonhosted.org/packages/18/cd/def29615dac250cda3d141e1c03b7153b9a027360bde0272a6768c5fae33/nodejs_wheel_binaries-22.13.1-py2.py3-none-win_amd64.whl", hash = "sha256:549371a929a29fbce8d0ab8f1b5410549946d4f1b0376a5ce635b45f6d05298f", size = 40455444 }, + { url = "https://files.pythonhosted.org/packages/15/d7/6de2bc615203bf590ca437a5cac145b2f86d994ce329489125a0a90ba715/nodejs_wheel_binaries-22.13.1-py2.py3-none-win_arm64.whl", hash = "sha256:cf72d50d755f4e5c0709b0449de01768d96b3b1ec7aa531561415b88f179ad8b", size = 36200929 }, +] + [[package]] name = "numpy" version = "2.0.2" source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] sdist = { url = "https://files.pythonhosted.org/packages/a9/75/10dd1f8116a8b796cb2c737b674e02d02e80454bda953fa7e65d8c12b016/numpy-2.0.2.tar.gz", hash = "sha256:883c987dee1880e2a864ab0dc9892292582510604156762362d9326444636e78", size = 18902015 } wheels = [ { url = "https://files.pythonhosted.org/packages/21/91/3495b3237510f79f5d81f2508f9f13fea78ebfdf07538fc7444badda173d/numpy-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:51129a29dbe56f9ca83438b706e2e69a39892b5eda6cedcb6b0c9fdc9b0d3ece", size = 21165245 }, @@ -962,12 +1008,78 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cc/dc/d330a6faefd92b446ec0f0dfea4c3207bb1fef3c4771d19cf4543efd2c78/numpy-2.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a46288ec55ebbd58947d31d72be2c63cbf839f0a63b49cb755022310792a3385", size = 15828784 }, ] +[[package]] +name = "numpy" +version = "2.2.2" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13'", + "python_full_version == '3.12.*'", + "python_full_version >= '3.10' and python_full_version < '3.12'", +] +sdist = { url = "https://files.pythonhosted.org/packages/ec/d0/c12ddfd3a02274be06ffc71f3efc6d0e457b0409c4481596881e748cb264/numpy-2.2.2.tar.gz", hash = "sha256:ed6906f61834d687738d25988ae117683705636936cc605be0bb208b23df4d8f", size = 20233295 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/2a/69033dc22d981ad21325314f8357438078f5c28310a6d89fb3833030ec8a/numpy-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7079129b64cb78bdc8d611d1fd7e8002c0a2565da6a47c4df8062349fee90e3e", size = 21215825 }, + { url = "https://files.pythonhosted.org/packages/31/2c/39f91e00bbd3d5639b027ac48c55dc5f2992bd2b305412d26be4c830862a/numpy-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ec6c689c61df613b783aeb21f945c4cbe6c51c28cb70aae8430577ab39f163e", size = 14354996 }, + { url = "https://files.pythonhosted.org/packages/0a/2c/d468ebd253851af10de5b3e8f3418ebabfaab5f0337a75299fbeb8b8c17a/numpy-2.2.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:40c7ff5da22cd391944a28c6a9c638a5eef77fcf71d6e3a79e1d9d9e82752715", size = 5393621 }, + { url = "https://files.pythonhosted.org/packages/7f/f4/3d8a5a0da297034106c5de92be881aca7079cde6058934215a1de91334f6/numpy-2.2.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:995f9e8181723852ca458e22de5d9b7d3ba4da3f11cc1cb113f093b271d7965a", size = 6928931 }, + { url = "https://files.pythonhosted.org/packages/47/a7/029354ab56edd43dd3f5efbfad292b8844f98b93174f322f82353fa46efa/numpy-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b78ea78450fd96a498f50ee096f69c75379af5138f7881a51355ab0e11286c97", size = 14333157 }, + { url = "https://files.pythonhosted.org/packages/e3/d7/11fc594838d35c43519763310c316d4fd56f8600d3fc80a8e13e325b5c5c/numpy-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fbe72d347fbc59f94124125e73fc4976a06927ebc503ec5afbfb35f193cd957", size = 16381794 }, + { url = "https://files.pythonhosted.org/packages/af/d4/dd9b19cd4aff9c79d3f54d17f8be815407520d3116004bc574948336981b/numpy-2.2.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8e6da5cffbbe571f93588f562ed130ea63ee206d12851b60819512dd3e1ba50d", size = 15543990 }, + { url = "https://files.pythonhosted.org/packages/30/97/ab96b7650f27f684a9b1e46757a7294ecc50cab27701d05f146e9f779627/numpy-2.2.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:09d6a2032faf25e8d0cadde7fd6145118ac55d2740132c1d845f98721b5ebcfd", size = 18170896 }, + { url = "https://files.pythonhosted.org/packages/81/9b/bae9618cab20db67a2ca9d711795cad29b2ca4b73034dd3b5d05b962070a/numpy-2.2.2-cp310-cp310-win32.whl", hash = "sha256:159ff6ee4c4a36a23fe01b7c3d07bd8c14cc433d9720f977fcd52c13c0098160", size = 6573458 }, + { url = "https://files.pythonhosted.org/packages/92/9b/95678092febd14070cfb7906ea7932e71e9dd5a6ab3ee948f9ed975e905d/numpy-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:64bd6e1762cd7f0986a740fee4dff927b9ec2c5e4d9a28d056eb17d332158014", size = 12915812 }, + { url = "https://files.pythonhosted.org/packages/21/67/32c68756eed84df181c06528ff57e09138f893c4653448c4967311e0f992/numpy-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:642199e98af1bd2b6aeb8ecf726972d238c9877b0f6e8221ee5ab945ec8a2189", size = 21220002 }, + { url = "https://files.pythonhosted.org/packages/3b/89/f43bcad18f2b2e5814457b1c7f7b0e671d0db12c8c0e43397ab8cb1831ed/numpy-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6d9fc9d812c81e6168b6d405bf00b8d6739a7f72ef22a9214c4241e0dc70b323", size = 14391215 }, + { url = "https://files.pythonhosted.org/packages/9c/e6/efb8cd6122bf25e86e3dd89d9dbfec9e6861c50e8810eed77d4be59b51c6/numpy-2.2.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:c7d1fd447e33ee20c1f33f2c8e6634211124a9aabde3c617687d8b739aa69eac", size = 5391918 }, + { url = "https://files.pythonhosted.org/packages/47/e2/fccf89d64d9b47ffb242823d4e851fc9d36fa751908c9aac2807924d9b4e/numpy-2.2.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:451e854cfae0febe723077bd0cf0a4302a5d84ff25f0bfece8f29206c7bed02e", size = 6933133 }, + { url = "https://files.pythonhosted.org/packages/34/22/5ece749c0e5420a9380eef6fbf83d16a50010bd18fef77b9193d80a6760e/numpy-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd249bc894af67cbd8bad2c22e7cbcd46cf87ddfca1f1289d1e7e54868cc785c", size = 14338187 }, + { url = "https://files.pythonhosted.org/packages/5b/86/caec78829311f62afa6fa334c8dfcd79cffb4d24bcf96ee02ae4840d462b/numpy-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02935e2c3c0c6cbe9c7955a8efa8908dd4221d7755644c59d1bba28b94fd334f", size = 16393429 }, + { url = "https://files.pythonhosted.org/packages/c8/4e/0c25f74c88239a37924577d6ad780f3212a50f4b4b5f54f5e8c918d726bd/numpy-2.2.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a972cec723e0563aa0823ee2ab1df0cb196ed0778f173b381c871a03719d4826", size = 15559103 }, + { url = "https://files.pythonhosted.org/packages/d4/bd/d557f10fa50dc4d5871fb9606af563249b66af2fc6f99041a10e8757c6f1/numpy-2.2.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d6d6a0910c3b4368d89dde073e630882cdb266755565155bc33520283b2d9df8", size = 18182967 }, + { url = "https://files.pythonhosted.org/packages/30/e9/66cc0f66386d78ed89e45a56e2a1d051e177b6e04477c4a41cd590ef4017/numpy-2.2.2-cp311-cp311-win32.whl", hash = "sha256:860fd59990c37c3ef913c3ae390b3929d005243acca1a86facb0773e2d8d9e50", size = 6571499 }, + { url = "https://files.pythonhosted.org/packages/66/a3/4139296b481ae7304a43581046b8f0a20da6a0dfe0ee47a044cade796603/numpy-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:da1eeb460ecce8d5b8608826595c777728cdf28ce7b5a5a8c8ac8d949beadcf2", size = 12919805 }, + { url = "https://files.pythonhosted.org/packages/0c/e6/847d15770ab7a01e807bdfcd4ead5bdae57c0092b7dc83878171b6af97bb/numpy-2.2.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ac9bea18d6d58a995fac1b2cb4488e17eceeac413af014b1dd26170b766d8467", size = 20912636 }, + { url = "https://files.pythonhosted.org/packages/d1/af/f83580891577b13bd7e261416120e036d0d8fb508c8a43a73e38928b794b/numpy-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:23ae9f0c2d889b7b2d88a3791f6c09e2ef827c2446f1c4a3e3e76328ee4afd9a", size = 14098403 }, + { url = "https://files.pythonhosted.org/packages/2b/86/d019fb60a9d0f1d4cf04b014fe88a9135090adfadcc31c1fadbb071d7fa7/numpy-2.2.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:3074634ea4d6df66be04f6728ee1d173cfded75d002c75fac79503a880bf3825", size = 5128938 }, + { url = "https://files.pythonhosted.org/packages/7a/1b/50985edb6f1ec495a1c36452e860476f5b7ecdc3fc59ea89ccad3c4926c5/numpy-2.2.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:8ec0636d3f7d68520afc6ac2dc4b8341ddb725039de042faf0e311599f54eb37", size = 6661937 }, + { url = "https://files.pythonhosted.org/packages/f4/1b/17efd94cad1b9d605c3f8907fb06bcffc4ce4d1d14d46b95316cccccf2b9/numpy-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ffbb1acd69fdf8e89dd60ef6182ca90a743620957afb7066385a7bbe88dc748", size = 14049518 }, + { url = "https://files.pythonhosted.org/packages/5b/73/65d2f0b698df1731e851e3295eb29a5ab8aa06f763f7e4188647a809578d/numpy-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0349b025e15ea9d05c3d63f9657707a4e1d471128a3b1d876c095f328f8ff7f0", size = 16099146 }, + { url = "https://files.pythonhosted.org/packages/d5/69/308f55c0e19d4b5057b5df286c5433822e3c8039ede06d4051d96f1c2c4e/numpy-2.2.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:463247edcee4a5537841d5350bc87fe8e92d7dd0e8c71c995d2c6eecb8208278", size = 15246336 }, + { url = "https://files.pythonhosted.org/packages/f0/d8/d8d333ad0d8518d077a21aeea7b7c826eff766a2b1ce1194dea95ca0bacf/numpy-2.2.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9dd47ff0cb2a656ad69c38da850df3454da88ee9a6fde0ba79acceee0e79daba", size = 17863507 }, + { url = "https://files.pythonhosted.org/packages/82/6e/0b84ad3103ffc16d6673e63b5acbe7901b2af96c2837174c6318c98e27ab/numpy-2.2.2-cp312-cp312-win32.whl", hash = "sha256:4525b88c11906d5ab1b0ec1f290996c0020dd318af8b49acaa46f198b1ffc283", size = 6276491 }, + { url = "https://files.pythonhosted.org/packages/fc/84/7f801a42a67b9772a883223a0a1e12069a14626c81a732bd70aac57aebc1/numpy-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:5acea83b801e98541619af398cc0109ff48016955cc0818f478ee9ef1c5c3dcb", size = 12616372 }, + { url = "https://files.pythonhosted.org/packages/e1/fe/df5624001f4f5c3e0b78e9017bfab7fdc18a8d3b3d3161da3d64924dd659/numpy-2.2.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b208cfd4f5fe34e1535c08983a1a6803fdbc7a1e86cf13dd0c61de0b51a0aadc", size = 20899188 }, + { url = "https://files.pythonhosted.org/packages/a9/80/d349c3b5ed66bd3cb0214be60c27e32b90a506946857b866838adbe84040/numpy-2.2.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d0bbe7dd86dca64854f4b6ce2ea5c60b51e36dfd597300057cf473d3615f2369", size = 14113972 }, + { url = "https://files.pythonhosted.org/packages/9d/50/949ec9cbb28c4b751edfa64503f0913cbfa8d795b4a251e7980f13a8a655/numpy-2.2.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:22ea3bb552ade325530e72a0c557cdf2dea8914d3a5e1fecf58fa5dbcc6f43cd", size = 5114294 }, + { url = "https://files.pythonhosted.org/packages/8d/f3/399c15629d5a0c68ef2aa7621d430b2be22034f01dd7f3c65a9c9666c445/numpy-2.2.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:128c41c085cab8a85dc29e66ed88c05613dccf6bc28b3866cd16050a2f5448be", size = 6648426 }, + { url = "https://files.pythonhosted.org/packages/2c/03/c72474c13772e30e1bc2e558cdffd9123c7872b731263d5648b5c49dd459/numpy-2.2.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:250c16b277e3b809ac20d1f590716597481061b514223c7badb7a0f9993c7f84", size = 14045990 }, + { url = "https://files.pythonhosted.org/packages/83/9c/96a9ab62274ffafb023f8ee08c88d3d31ee74ca58869f859db6845494fa6/numpy-2.2.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0c8854b09bc4de7b041148d8550d3bd712b5c21ff6a8ed308085f190235d7ff", size = 16096614 }, + { url = "https://files.pythonhosted.org/packages/d5/34/cd0a735534c29bec7093544b3a509febc9b0df77718a9b41ffb0809c9f46/numpy-2.2.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b6fb9c32a91ec32a689ec6410def76443e3c750e7cfc3fb2206b985ffb2b85f0", size = 15242123 }, + { url = "https://files.pythonhosted.org/packages/5e/6d/541717a554a8f56fa75e91886d9b79ade2e595918690eb5d0d3dbd3accb9/numpy-2.2.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:57b4012e04cc12b78590a334907e01b3a85efb2107df2b8733ff1ed05fce71de", size = 17859160 }, + { url = "https://files.pythonhosted.org/packages/b9/a5/fbf1f2b54adab31510728edd06a05c1b30839f37cf8c9747cb85831aaf1b/numpy-2.2.2-cp313-cp313-win32.whl", hash = "sha256:4dbd80e453bd34bd003b16bd802fac70ad76bd463f81f0c518d1245b1c55e3d9", size = 6273337 }, + { url = "https://files.pythonhosted.org/packages/56/e5/01106b9291ef1d680f82bc47d0c5b5e26dfed15b0754928e8f856c82c881/numpy-2.2.2-cp313-cp313-win_amd64.whl", hash = "sha256:5a8c863ceacae696aff37d1fd636121f1a512117652e5dfb86031c8d84836369", size = 12609010 }, + { url = "https://files.pythonhosted.org/packages/9f/30/f23d9876de0f08dceb707c4dcf7f8dd7588266745029debb12a3cdd40be6/numpy-2.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:b3482cb7b3325faa5f6bc179649406058253d91ceda359c104dac0ad320e1391", size = 20924451 }, + { url = "https://files.pythonhosted.org/packages/6a/ec/6ea85b2da9d5dfa1dbb4cb3c76587fc8ddcae580cb1262303ab21c0926c4/numpy-2.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9491100aba630910489c1d0158034e1c9a6546f0b1340f716d522dc103788e39", size = 14122390 }, + { url = "https://files.pythonhosted.org/packages/68/05/bfbdf490414a7dbaf65b10c78bc243f312c4553234b6d91c94eb7c4b53c2/numpy-2.2.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:41184c416143defa34cc8eb9d070b0a5ba4f13a0fa96a709e20584638254b317", size = 5156590 }, + { url = "https://files.pythonhosted.org/packages/f7/ec/fe2e91b2642b9d6544518388a441bcd65c904cea38d9ff998e2e8ebf808e/numpy-2.2.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:7dca87ca328f5ea7dafc907c5ec100d187911f94825f8700caac0b3f4c384b49", size = 6671958 }, + { url = "https://files.pythonhosted.org/packages/b1/6f/6531a78e182f194d33ee17e59d67d03d0d5a1ce7f6be7343787828d1bd4a/numpy-2.2.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bc61b307655d1a7f9f4b043628b9f2b721e80839914ede634e3d485913e1fb2", size = 14019950 }, + { url = "https://files.pythonhosted.org/packages/e1/fb/13c58591d0b6294a08cc40fcc6b9552d239d773d520858ae27f39997f2ae/numpy-2.2.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fad446ad0bc886855ddf5909cbf8cb5d0faa637aaa6277fb4b19ade134ab3c7", size = 16079759 }, + { url = "https://files.pythonhosted.org/packages/2c/f2/f2f8edd62abb4b289f65a7f6d1f3650273af00b91b7267a2431be7f1aec6/numpy-2.2.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:149d1113ac15005652e8d0d3f6fd599360e1a708a4f98e43c9c77834a28238cb", size = 15226139 }, + { url = "https://files.pythonhosted.org/packages/aa/29/14a177f1a90b8ad8a592ca32124ac06af5eff32889874e53a308f850290f/numpy-2.2.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:106397dbbb1896f99e044efc90360d098b3335060375c26aa89c0d8a97c5f648", size = 17856316 }, + { url = "https://files.pythonhosted.org/packages/95/03/242ae8d7b97f4e0e4ab8dd51231465fb23ed5e802680d629149722e3faf1/numpy-2.2.2-cp313-cp313t-win32.whl", hash = "sha256:0eec19f8af947a61e968d5429f0bd92fec46d92b0008d0a6685b40d6adf8a4f4", size = 6329134 }, + { url = "https://files.pythonhosted.org/packages/80/94/cd9e9b04012c015cb6320ab3bf43bc615e248dddfeb163728e800a5d96f0/numpy-2.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:97b974d3ba0fb4612b77ed35d7627490e8e3dff56ab41454d9e8b23448940576", size = 12696208 }, + { url = "https://files.pythonhosted.org/packages/96/7e/1dd770ee68916ed358991ab62c2cc353ffd98d0b75b901d52183ca28e8bb/numpy-2.2.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b0531f0b0e07643eb089df4c509d30d72c9ef40defa53e41363eca8a8cc61495", size = 21047291 }, + { url = "https://files.pythonhosted.org/packages/d1/3c/ccd08578dc532a8e6927952339d4a02682b776d5e85be49ed0760308433e/numpy-2.2.2-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:e9e82dcb3f2ebbc8cb5ce1102d5f1c5ed236bf8a11730fb45ba82e2841ec21df", size = 6792494 }, + { url = "https://files.pythonhosted.org/packages/7c/28/8754b9aee4f97199f9a047f73bb644b5a2014994a6d7b061ba67134a42de/numpy-2.2.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0d4142eb40ca6f94539e4db929410f2a46052a0fe7a2c1c59f6179c39938d2a", size = 16197312 }, + { url = "https://files.pythonhosted.org/packages/26/96/deb93f871f401045a684ca08a009382b247d14996d7a94fea6aa43c67b94/numpy-2.2.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:356ca982c188acbfa6af0d694284d8cf20e95b1c3d0aefa8929376fea9146f60", size = 12822674 }, +] + [[package]] name = "nvidia-cublas-cu12" version = "12.4.5.8" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7f/7f/7fbae15a3982dc9595e49ce0f19332423b260045d0a6afe93cdbe2f1f624/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0f8aa1706812e00b9f19dfe0cdb3999b092ccb8ca168c0db5b8ea712456fd9b3", size = 363333771 }, { url = "https://files.pythonhosted.org/packages/ae/71/1c91302526c45ab494c23f61c7a84aa568b8c1f9d196efa5993957faf906/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_x86_64.whl", hash = "sha256:2fc8da60df463fdefa81e323eef2e36489e1c94335b5358bcb38360adf75ac9b", size = 363438805 }, ] @@ -976,7 +1088,6 @@ name = "nvidia-cuda-cupti-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/93/b5/9fb3d00386d3361b03874246190dfec7b206fd74e6e287b26a8fcb359d95/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:79279b35cf6f91da114182a5ce1864997fd52294a87a16179ce275773799458a", size = 12354556 }, { url = "https://files.pythonhosted.org/packages/67/42/f4f60238e8194a3106d06a058d494b18e006c10bb2b915655bd9f6ea4cb1/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9dec60f5ac126f7bb551c055072b69d85392b13311fcc1bcda2202d172df30fb", size = 13813957 }, ] @@ -985,7 +1096,6 @@ name = "nvidia-cuda-nvrtc-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/77/aa/083b01c427e963ad0b314040565ea396f914349914c298556484f799e61b/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0eedf14185e04b76aa05b1fea04133e59f465b6f960c0cbf4e37c3cb6b0ea198", size = 24133372 }, { url = "https://files.pythonhosted.org/packages/2c/14/91ae57cd4db3f9ef7aa99f4019cfa8d54cb4caa7e00975df6467e9725a9f/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a178759ebb095827bd30ef56598ec182b85547f1508941a3d560eb7ea1fbf338", size = 24640306 }, ] @@ -994,7 +1104,6 @@ name = "nvidia-cuda-runtime-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a1/aa/b656d755f474e2084971e9a297def515938d56b466ab39624012070cb773/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:961fe0e2e716a2a1d967aab7caee97512f71767f852f67432d572e36cb3a11f3", size = 894177 }, { url = "https://files.pythonhosted.org/packages/ea/27/1795d86fe88ef397885f2e580ac37628ed058a92ed2c39dc8eac3adf0619/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:64403288fa2136ee8e467cdc9c9427e0434110899d07c779f25b5c068934faa5", size = 883737 }, ] @@ -1017,7 +1126,6 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/7a/8a/0e728f749baca3fbeffad762738276e5df60851958be7783af121a7221e7/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5dad8008fc7f92f5ddfa2101430917ce2ffacd86824914c82e28990ad7f00399", size = 211422548 }, { url = "https://files.pythonhosted.org/packages/27/94/3266821f65b92b3138631e9c8e7fe1fb513804ac934485a8d05776e1dd43/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9", size = 211459117 }, ] @@ -1026,7 +1134,6 @@ name = "nvidia-curand-cu12" version = "10.3.5.147" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/80/9c/a79180e4d70995fdf030c6946991d0171555c6edf95c265c6b2bf7011112/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1f173f09e3e3c76ab084aba0de819c49e56614feae5c12f69883f4ae9bb5fad9", size = 56314811 }, { url = "https://files.pythonhosted.org/packages/8a/6d/44ad094874c6f1b9c654f8ed939590bdc408349f137f9b98a3a23ccec411/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a88f583d4e0bb643c49743469964103aa59f7f708d862c3ddb0fc07f851e3b8b", size = 56305206 }, ] @@ -1040,7 +1147,6 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/46/6b/a5c33cf16af09166845345275c34ad2190944bcc6026797a39f8e0a282e0/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d338f155f174f90724bbde3758b7ac375a70ce8e706d70b018dd3375545fc84e", size = 127634111 }, { url = "https://files.pythonhosted.org/packages/3a/e1/5b9089a4b2a4790dfdea8b3a006052cfecff58139d5a4e34cb1a51df8d6f/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260", size = 127936057 }, ] @@ -1052,10 +1158,17 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/96/a9/c0d2f83a53d40a4a41be14cea6a0bf9e668ffcf8b004bd65633f433050c0/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9d32f62896231ebe0480efd8a7f702e143c98cfaa0e8a76df3386c1ba2b54df3", size = 207381987 }, { url = "https://files.pythonhosted.org/packages/db/f7/97a9ea26ed4bbbfc2d470994b8b4f338ef663be97b8f677519ac195e113d/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1", size = 207454763 }, ] +[[package]] +name = "nvidia-cusparselt-cu12" +version = "0.6.2" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/a8/bcbb63b53a4b1234feeafb65544ee55495e1bb37ec31b999b963cbccfd1d/nvidia_cusparselt_cu12-0.6.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:df2c24502fd76ebafe7457dbc4716b2fec071aabaed4fb7691a201cde03704d9", size = 150057751 }, +] + [[package]] name = "nvidia-nccl-cu12" version = "2.21.5" @@ -1069,7 +1182,6 @@ name = "nvidia-nvjitlink-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/02/45/239d52c05074898a80a900f49b1615d81c07fceadd5ad6c4f86a987c0bc4/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:4abe7fef64914ccfa909bc2ba39739670ecc9e820c83ccc7a6ed414122599b83", size = 20552510 }, { url = "https://files.pythonhosted.org/packages/ff/ff/847841bacfbefc97a00036e0fce5a0f086b640756dc38caea5e1bb002655/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57", size = 21066810 }, ] @@ -1078,7 +1190,6 @@ name = "nvidia-nvtx-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/06/39/471f581edbb7804b39e8063d92fc8305bdc7a80ae5c07dbe6ea5c50d14a5/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7959ad635db13edf4fc65c06a6e9f9e55fc2f92596db928d169c0bb031e88ef3", size = 100417 }, { url = "https://files.pythonhosted.org/packages/87/20/199b8713428322a2f22b722c62b8cc278cc53dffa9705d744484b5035ee9/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a", size = 99144 }, ] @@ -1093,25 +1204,16 @@ wheels = [ [[package]] name = "paramiko" -version = "3.5.0" +version = "3.5.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "bcrypt" }, { name = "cryptography" }, { name = "pynacl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1b/0f/c00296e36ff7485935b83d466c4f2cf5934b84b0ad14e81796e1d9d3609b/paramiko-3.5.0.tar.gz", hash = "sha256:ad11e540da4f55cedda52931f1a3f812a8238a7af7f62a60de538cd80bb28124", size = 1704305 } +sdist = { url = "https://files.pythonhosted.org/packages/7d/15/ad6ce226e8138315f2451c2aeea985bf35ee910afb477bae7477dc3a8f3b/paramiko-3.5.1.tar.gz", hash = "sha256:b2c665bc45b2b215bd7d7f039901b14b067da00f3a11e6640995fd58f2664822", size = 1566110 } wheels = [ - { url = "https://files.pythonhosted.org/packages/1f/66/14b2c030fcce69cba482d205c2d1462ca5c77303a263260dcb1192801c85/paramiko-3.5.0-py3-none-any.whl", hash = "sha256:1fedf06b085359051cd7d0d270cebe19e755a8a921cc2ddbfa647fb0cd7d68f9", size = 227143 }, -] - -[[package]] -name = "pkginfo" -version = "1.10.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2f/72/347ec5be4adc85c182ed2823d8d1c7b51e13b9a6b0c1aae59582eca652df/pkginfo-1.10.0.tar.gz", hash = "sha256:5df73835398d10db79f8eecd5cd86b1f6d29317589ea70796994d49399af6297", size = 378457 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/56/09/054aea9b7534a15ad38a363a2bd974c20646ab1582a387a95b8df1bfea1c/pkginfo-1.10.0-py3-none-any.whl", hash = "sha256:889a6da2ed7ffc58ab5b900d888ddce90bce912f2d2de1dc1c26f4cb9fe65097", size = 30392 }, + { url = "https://files.pythonhosted.org/packages/15/f8/c7bd0ef12954a81a1d3cea60a13946bd9a49a0036a5927770c461eade7ae/paramiko-3.5.1-py3-none-any.whl", hash = "sha256:43b9a0501fc2b5e70680388d9346cf252cfb7d00b0667c39e80eb43a408b8f61", size = 227298 }, ] [[package]] @@ -1143,11 +1245,11 @@ wheels = [ [[package]] name = "pygments" -version = "2.18.0" +version = "2.19.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/8e/62/8336eff65bcbc8e4cb5d05b55faf041285951b6e80f33e2bff2024788f31/pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", size = 4891905 } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } wheels = [ - { url = "https://files.pythonhosted.org/packages/f7/3f/01c8b82017c199075f8f788d0d906b9ffbbc5a47dc9918a945e13d5a2bda/pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a", size = 1205513 }, + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, ] [[package]] @@ -1181,20 +1283,25 @@ wheels = [ [[package]] name = "pyright" -version = "1.1.388" +version = "1.1.393" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "nodeenv" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9c/83/e9867538a794638d2d20ac3ab3106a31aca1d9cfea530c9b2921809dae03/pyright-1.1.388.tar.gz", hash = "sha256:0166d19b716b77fd2d9055de29f71d844874dbc6b9d3472ccd22df91db3dfa34", size = 21939 } +sdist = { url = "https://files.pythonhosted.org/packages/f4/c1/aede6c74e664ab103673e4f1b7fd3d058fef32276be5c43572f4067d4a8e/pyright-1.1.393.tar.gz", hash = "sha256:aeeb7ff4e0364775ef416a80111613f91a05c8e01e58ecfefc370ca0db7aed9c", size = 3790430 } wheels = [ - { url = "https://files.pythonhosted.org/packages/03/57/7fb00363b7f267a398c5bdf4f55f3e64f7c2076b2e7d2901b3373d52b6ff/pyright-1.1.388-py3-none-any.whl", hash = "sha256:c7068e9f2c23539c6ac35fc9efac6c6c1b9aa5a0ce97a9a8a6cf0090d7cbf84c", size = 18579 }, + { url = "https://files.pythonhosted.org/packages/92/47/f0dd0f8afce13d92e406421ecac6df0990daee84335fc36717678577d3e0/pyright-1.1.393-py3-none-any.whl", hash = "sha256:8320629bb7a44ca90944ba599390162bf59307f3d9fb6e27da3b7011b8c17ae5", size = 5646057 }, +] + +[package.optional-dependencies] +nodejs = [ + { name = "nodejs-wheel-binaries" }, ] [[package]] name = "pytest" -version = "8.3.3" +version = "8.3.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, @@ -1204,9 +1311,9 @@ dependencies = [ { name = "pluggy" }, { name = "tomli", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8b/6c/62bbd536103af674e227c41a8f3dcd022d591f6eed5facb5a0f31ee33bbc/pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181", size = 1442487 } +sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } wheels = [ - { url = "https://files.pythonhosted.org/packages/6b/77/7440a06a8ead44c7757a64362dd22df5760f9b12dc5f11b6188cd2fc27a0/pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2", size = 342341 }, + { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, ] [[package]] @@ -1273,16 +1380,16 @@ wheels = [ [[package]] name = "readme-renderer" -version = "43.0" +version = "44.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "docutils" }, { name = "nh3" }, { name = "pygments" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fe/b5/536c775084d239df6345dccf9b043419c7e3308bc31be4c7882196abc62e/readme_renderer-43.0.tar.gz", hash = "sha256:1818dd28140813509eeed8d62687f7cd4f7bad90d4db586001c5dc09d4fde311", size = 31768 } +sdist = { url = "https://files.pythonhosted.org/packages/5a/a9/104ec9234c8448c4379768221ea6df01260cd6c2ce13182d4eac531c8342/readme_renderer-44.0.tar.gz", hash = "sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1", size = 32056 } wheels = [ - { url = "https://files.pythonhosted.org/packages/45/be/3ea20dc38b9db08387cf97997a85a7d51527ea2057d71118feb0aa8afa55/readme_renderer-43.0-py3-none-any.whl", hash = "sha256:19db308d86ecd60e5affa3b2a98f017af384678c63c88e5d4556a380e674f3f9", size = 13301 }, + { url = "https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl", hash = "sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151", size = 13310 }, ] [[package]] @@ -1422,14 +1529,14 @@ wheels = [ [[package]] name = "ruamel-yaml" -version = "0.18.6" +version = "0.18.10" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "ruamel-yaml-clib", marker = "python_full_version < '3.13' and platform_python_implementation == 'CPython'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/29/81/4dfc17eb6ebb1aac314a3eb863c1325b907863a1b8b1382cdffcb6ac0ed9/ruamel.yaml-0.18.6.tar.gz", hash = "sha256:8b27e6a217e786c6fbe5634d8f3f11bc63e0f80f6a5890f28863d9c45aac311b", size = 143362 } +sdist = { url = "https://files.pythonhosted.org/packages/ea/46/f44d8be06b85bc7c4d8c95d658be2b68f27711f279bf9dd0612a5e4794f5/ruamel.yaml-0.18.10.tar.gz", hash = "sha256:20c86ab29ac2153f80a428e1254a8adf686d3383df04490514ca3b79a362db58", size = 143447 } wheels = [ - { url = "https://files.pythonhosted.org/packages/73/67/8ece580cc363331d9a53055130f86b096bf16e38156e33b1d3014fffda6b/ruamel.yaml-0.18.6-py3-none-any.whl", hash = "sha256:57b53ba33def16c4f3d807c0ccbc00f8a6081827e81ba2491691b76882d0c636", size = 117761 }, + { url = "https://files.pythonhosted.org/packages/c2/36/dfc1ebc0081e6d39924a2cc53654497f967a084a436bb64402dfce4254d9/ruamel.yaml-0.18.10-py3-none-any.whl", hash = "sha256:30f22513ab2301b3d2b577adc121c6471f28734d3d9728581245f1e76468b4f1", size = 117729 }, ] [[package]] @@ -1444,6 +1551,7 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7f/b7/20c6f3c0b656fe609675d69bc135c03aac9e3865912444be6339207b6648/ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76", size = 686712 }, { url = "https://files.pythonhosted.org/packages/cd/11/d12dbf683471f888d354dac59593873c2b45feb193c5e3e0f2ebf85e68b9/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6", size = 663936 }, { url = "https://files.pythonhosted.org/packages/72/14/4c268f5077db5c83f743ee1daeb236269fa8577133a5cfa49f8b382baf13/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd", size = 696580 }, + { url = "https://files.pythonhosted.org/packages/30/fc/8cd12f189c6405a4c1cf37bd633aa740a9538c8e40497c231072d0fef5cf/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a", size = 663393 }, { url = "https://files.pythonhosted.org/packages/80/29/c0a017b704aaf3cbf704989785cd9c5d5b8ccec2dae6ac0c53833c84e677/ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da", size = 100326 }, { url = "https://files.pythonhosted.org/packages/3a/65/fa39d74db4e2d0cd252355732d966a460a41cd01c6353b820a0952432839/ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28", size = 118079 }, { url = "https://files.pythonhosted.org/packages/fb/8f/683c6ad562f558cbc4f7c029abcd9599148c51c54b5ef0f24f2638da9fbb/ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6", size = 132224 }, @@ -1452,6 +1560,7 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/86/29/88c2567bc893c84d88b4c48027367c3562ae69121d568e8a3f3a8d363f4d/ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52", size = 703012 }, { url = "https://files.pythonhosted.org/packages/11/46/879763c619b5470820f0cd6ca97d134771e502776bc2b844d2adb6e37753/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642", size = 704352 }, { url = "https://files.pythonhosted.org/packages/02/80/ece7e6034256a4186bbe50dee28cd032d816974941a6abf6a9d65e4228a7/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2", size = 737344 }, + { url = "https://files.pythonhosted.org/packages/f0/ca/e4106ac7e80efbabdf4bf91d3d32fc424e41418458251712f5672eada9ce/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3", size = 714498 }, { url = "https://files.pythonhosted.org/packages/67/58/b1f60a1d591b771298ffa0428237afb092c7f29ae23bad93420b1eb10703/ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4", size = 100205 }, { url = "https://files.pythonhosted.org/packages/b4/4f/b52f634c9548a9291a70dfce26ca7ebce388235c93588a1068028ea23fcc/ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb", size = 118185 }, { url = "https://files.pythonhosted.org/packages/48/41/e7a405afbdc26af961678474a55373e1b323605a4f5e2ddd4a80ea80f628/ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632", size = 133433 }, @@ -1460,6 +1569,7 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/52/a9/d39f3c5ada0a3bb2870d7db41901125dbe2434fa4f12ca8c5b83a42d7c53/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd", size = 706497 }, { url = "https://files.pythonhosted.org/packages/b0/fa/097e38135dadd9ac25aecf2a54be17ddf6e4c23e43d538492a90ab3d71c6/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31", size = 698042 }, { url = "https://files.pythonhosted.org/packages/ec/d5/a659ca6f503b9379b930f13bc6b130c9f176469b73b9834296822a83a132/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680", size = 745831 }, + { url = "https://files.pythonhosted.org/packages/db/5d/36619b61ffa2429eeaefaab4f3374666adf36ad8ac6330d855848d7d36fd/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d", size = 715692 }, { url = "https://files.pythonhosted.org/packages/b1/82/85cb92f15a4231c89b95dfe08b09eb6adca929ef7df7e17ab59902b6f589/ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5", size = 98777 }, { url = "https://files.pythonhosted.org/packages/d7/8f/c3654f6f1ddb75daf3922c3d8fc6005b1ab56671ad56ffb874d908bfa668/ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4", size = 115523 }, { url = "https://files.pythonhosted.org/packages/29/00/4864119668d71a5fa45678f380b5923ff410701565821925c69780356ffa/ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a", size = 132011 }, @@ -1468,6 +1578,7 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e2/a9/28f60726d29dfc01b8decdb385de4ced2ced9faeb37a847bd5cf26836815/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6", size = 701785 }, { url = "https://files.pythonhosted.org/packages/84/7e/8e7ec45920daa7f76046578e4f677a3215fe8f18ee30a9cb7627a19d9b4c/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf", size = 693017 }, { url = "https://files.pythonhosted.org/packages/c5/b3/d650eaade4ca225f02a648321e1ab835b9d361c60d51150bac49063b83fa/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1", size = 741270 }, + { url = "https://files.pythonhosted.org/packages/87/b8/01c29b924dcbbed75cc45b30c30d565d763b9c4d540545a0eeecffb8f09c/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01", size = 709059 }, { url = "https://files.pythonhosted.org/packages/30/8c/ed73f047a73638257aa9377ad356bea4d96125b305c34a28766f4445cc0f/ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6", size = 98583 }, { url = "https://files.pythonhosted.org/packages/b0/85/e8e751d8791564dd333d5d9a4eab0a7a115f7e349595417fd50ecae3395c/ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3", size = 115190 }, { url = "https://files.pythonhosted.org/packages/e5/46/ccdef7a84ad745c37cb3d9a81790f28fbc9adf9c237dba682017b123294e/ruamel.yaml.clib-0.2.12-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987", size = 131834 }, @@ -1476,113 +1587,56 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/da/1c/23497017c554fc06ff5701b29355522cff850f626337fff35d9ab352cb18/ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7", size = 689072 }, { url = "https://files.pythonhosted.org/packages/68/e6/f3d4ff3223f9ea49c3b7169ec0268e42bd49f87c70c0e3e853895e4a7ae2/ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285", size = 667091 }, { url = "https://files.pythonhosted.org/packages/84/62/ead07043527642491e5011b143f44b81ef80f1025a96069b7210e0f2f0f3/ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed", size = 699111 }, + { url = "https://files.pythonhosted.org/packages/52/b3/fe4d84446f7e4887e3bea7ceff0a7df23790b5ed625f830e79ace88ebefb/ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2c59aa6170b990d8d2719323e628aaf36f3bfbc1c26279c0eeeb24d05d2d11c7", size = 666365 }, { url = "https://files.pythonhosted.org/packages/6e/b3/7feb99a00bfaa5c6868617bb7651308afde85e5a0b23cd187fe5de65feeb/ruamel.yaml.clib-0.2.12-cp39-cp39-win32.whl", hash = "sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12", size = 100863 }, { url = "https://files.pythonhosted.org/packages/93/07/de635108684b7a5bb06e432b0930c5a04b6c59efe73bd966d8db3cc208f2/ruamel.yaml.clib-0.2.12-cp39-cp39-win_amd64.whl", hash = "sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b", size = 118653 }, ] [[package]] name = "ruff" -version = "0.7.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4b/06/09d1276df977eece383d0ed66052fc24ec4550a61f8fbc0a11200e690496/ruff-0.7.3.tar.gz", hash = "sha256:e1d1ba2e40b6e71a61b063354d04be669ab0d39c352461f3d789cac68b54a313", size = 3243664 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/56/933d433c2489e4642487b835f53dd9ff015fb3d8fa459b09bb2ce42d7c4b/ruff-0.7.3-py3-none-linux_armv6l.whl", hash = "sha256:34f2339dc22687ec7e7002792d1f50712bf84a13d5152e75712ac08be565d344", size = 10372090 }, - { url = "https://files.pythonhosted.org/packages/20/ea/1f0a22a6bcdd3fc26c73f63a025d05bd565901b729d56bcb093c722a6c4c/ruff-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:fb397332a1879b9764a3455a0bb1087bda876c2db8aca3a3cbb67b3dbce8cda0", size = 10190037 }, - { url = "https://files.pythonhosted.org/packages/16/74/aca75666e0d481fe394e76a8647c44ea919087748024924baa1a17371e3e/ruff-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:37d0b619546103274e7f62643d14e1adcbccb242efda4e4bdb9544d7764782e9", size = 9811998 }, - { url = "https://files.pythonhosted.org/packages/20/a1/cf446a0d7f78ea1f0bd2b9171c11dfe746585c0c4a734b25966121eb4f5d/ruff-0.7.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59f0c3ee4d1a6787614e7135b72e21024875266101142a09a61439cb6e38a5", size = 10620626 }, - { url = "https://files.pythonhosted.org/packages/cd/c1/82b27d09286ae855f5d03b1ad37cf243f21eb0081732d4d7b0d658d439cb/ruff-0.7.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:44eb93c2499a169d49fafd07bc62ac89b1bc800b197e50ff4633aed212569299", size = 10177598 }, - { url = "https://files.pythonhosted.org/packages/b9/42/c0acac22753bf74013d035a5ef6c5c4c40ad4d6686bfb3fda7c6f37d9b37/ruff-0.7.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d0242ce53f3a576c35ee32d907475a8d569944c0407f91d207c8af5be5dae4e", size = 11171963 }, - { url = "https://files.pythonhosted.org/packages/43/18/bb0befb7fb9121dd9009e6a72eb98e24f1bacb07c6f3ecb55f032ba98aed/ruff-0.7.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6b6224af8b5e09772c2ecb8dc9f3f344c1aa48201c7f07e7315367f6dd90ac29", size = 11856157 }, - { url = "https://files.pythonhosted.org/packages/5e/91/04e98d7d6e32eca9d1372be595f9abc7b7f048795e32eb2edbd8794d50bd/ruff-0.7.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c50f95a82b94421c964fae4c27c0242890a20fe67d203d127e84fbb8013855f5", size = 11440331 }, - { url = "https://files.pythonhosted.org/packages/f5/dc/3fe99f2ce10b76d389041a1b9f99e7066332e479435d4bebcceea16caff5/ruff-0.7.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f3eff9961b5d2644bcf1616c606e93baa2d6b349e8aa8b035f654df252c8c67", size = 12725354 }, - { url = "https://files.pythonhosted.org/packages/43/7b/1daa712de1c5bc6cbbf9fa60e9c41cc48cda962dc6d2c4f2a224d2c3007e/ruff-0.7.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8963cab06d130c4df2fd52c84e9f10d297826d2e8169ae0c798b6221be1d1d2", size = 11010091 }, - { url = "https://files.pythonhosted.org/packages/b6/db/1227a903587432eb569e57a95b15a4f191a71fe315cde4c0312df7bc85da/ruff-0.7.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:61b46049d6edc0e4317fb14b33bd693245281a3007288b68a3f5b74a22a0746d", size = 10610687 }, - { url = "https://files.pythonhosted.org/packages/db/e2/dc41ee90c3085aadad4da614d310d834f641aaafddf3dfbba08210c616ce/ruff-0.7.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:10ebce7696afe4644e8c1a23b3cf8c0f2193a310c18387c06e583ae9ef284de2", size = 10254843 }, - { url = "https://files.pythonhosted.org/packages/6f/09/5f6cac1c91542bc5bd33d40b4c13b637bf64d7bb29e091dadb01b62527fe/ruff-0.7.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3f36d56326b3aef8eeee150b700e519880d1aab92f471eefdef656fd57492aa2", size = 10730962 }, - { url = "https://files.pythonhosted.org/packages/d3/42/89a4b9a24ef7d00269e24086c417a006f9a3ffeac2c80f2629eb5ce140ee/ruff-0.7.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5d024301109a0007b78d57ab0ba190087b43dce852e552734ebf0b0b85e4fb16", size = 11101907 }, - { url = "https://files.pythonhosted.org/packages/b0/5c/efdb4777686683a8edce94ffd812783bddcd3d2454d38c5ac193fef7c500/ruff-0.7.3-py3-none-win32.whl", hash = "sha256:4ba81a5f0c5478aa61674c5a2194de8b02652f17addf8dfc40c8937e6e7d79fc", size = 8611095 }, - { url = "https://files.pythonhosted.org/packages/bb/b8/28fbc6a4efa50178f973972d1c84b2d0a33cdc731588522ab751ac3da2f5/ruff-0.7.3-py3-none-win_amd64.whl", hash = "sha256:588a9ff2fecf01025ed065fe28809cd5a53b43505f48b69a1ac7707b1b7e4088", size = 9418283 }, - { url = "https://files.pythonhosted.org/packages/3f/77/b587cba6febd5e2003374f37eb89633f79f161e71084f94057c8653b7fb3/ruff-0.7.3-py3-none-win_arm64.whl", hash = "sha256:1713e2c5545863cdbfe2cbce21f69ffaf37b813bfd1fb3b90dc9a6f1963f5a8c", size = 8725228 }, +version = "0.9.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/02/74/6c359f6b9ed85b88df6ef31febce18faeb852f6c9855651dfb1184a46845/ruff-0.9.5.tar.gz", hash = "sha256:11aecd7a633932875ab3cb05a484c99970b9d52606ce9ea912b690b02653d56c", size = 3634177 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/4b/82b7c9ac874e72b82b19fd7eab57d122e2df44d2478d90825854f9232d02/ruff-0.9.5-py3-none-linux_armv6l.whl", hash = "sha256:d466d2abc05f39018d53f681fa1c0ffe9570e6d73cde1b65d23bb557c846f442", size = 11681264 }, + { url = "https://files.pythonhosted.org/packages/27/5c/f5ae0a9564e04108c132e1139d60491c0abc621397fe79a50b3dc0bd704b/ruff-0.9.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:38840dbcef63948657fa7605ca363194d2fe8c26ce8f9ae12eee7f098c85ac8a", size = 11657554 }, + { url = "https://files.pythonhosted.org/packages/2a/83/c6926fa3ccb97cdb3c438bb56a490b395770c750bf59f9bc1fe57ae88264/ruff-0.9.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d56ba06da53536b575fbd2b56517f6f95774ff7be0f62c80b9e67430391eeb36", size = 11088959 }, + { url = "https://files.pythonhosted.org/packages/af/a7/42d1832b752fe969ffdbfcb1b4cb477cb271bed5835110fb0a16ef31ab81/ruff-0.9.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f7cb2a01da08244c50b20ccfaeb5972e4228c3c3a1989d3ece2bc4b1f996001", size = 11902041 }, + { url = "https://files.pythonhosted.org/packages/53/cf/1fffa09fb518d646f560ccfba59f91b23c731e461d6a4dedd21a393a1ff1/ruff-0.9.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:96d5c76358419bc63a671caac70c18732d4fd0341646ecd01641ddda5c39ca0b", size = 11421069 }, + { url = "https://files.pythonhosted.org/packages/09/27/bb8f1b7304e2a9431f631ae7eadc35550fe0cf620a2a6a0fc4aa3d736f94/ruff-0.9.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:deb8304636ed394211f3a6d46c0e7d9535b016f53adaa8340139859b2359a070", size = 12625095 }, + { url = "https://files.pythonhosted.org/packages/d7/ce/ab00bc9d3df35a5f1b64f5117458160a009f93ae5caf65894ebb63a1842d/ruff-0.9.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:df455000bf59e62b3e8c7ba5ed88a4a2bc64896f900f311dc23ff2dc38156440", size = 13257797 }, + { url = "https://files.pythonhosted.org/packages/88/81/c639a082ae6d8392bc52256058ec60f493c6a4d06d5505bccface3767e61/ruff-0.9.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de92170dfa50c32a2b8206a647949590e752aca8100a0f6b8cefa02ae29dce80", size = 12763793 }, + { url = "https://files.pythonhosted.org/packages/b3/d0/0a3d8f56d1e49af466dc770eeec5c125977ba9479af92e484b5b0251ce9c/ruff-0.9.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d28532d73b1f3f627ba88e1456f50748b37f3a345d2be76e4c653bec6c3e393", size = 14386234 }, + { url = "https://files.pythonhosted.org/packages/04/70/e59c192a3ad476355e7f45fb3a87326f5219cc7c472e6b040c6c6595c8f0/ruff-0.9.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c746d7d1df64f31d90503ece5cc34d7007c06751a7a3bbeee10e5f2463d52d2", size = 12437505 }, + { url = "https://files.pythonhosted.org/packages/55/4e/3abba60a259d79c391713e7a6ccabf7e2c96e5e0a19100bc4204f1a43a51/ruff-0.9.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:11417521d6f2d121fda376f0d2169fb529976c544d653d1d6044f4c5562516ee", size = 11884799 }, + { url = "https://files.pythonhosted.org/packages/a3/db/b0183a01a9f25b4efcae919c18fb41d32f985676c917008620ad692b9d5f/ruff-0.9.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:5b9d71c3879eb32de700f2f6fac3d46566f644a91d3130119a6378f9312a38e1", size = 11527411 }, + { url = "https://files.pythonhosted.org/packages/0a/e4/3ebfcebca3dff1559a74c6becff76e0b64689cea02b7aab15b8b32ea245d/ruff-0.9.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:2e36c61145e70febcb78483903c43444c6b9d40f6d2f800b5552fec6e4a7bb9a", size = 12078868 }, + { url = "https://files.pythonhosted.org/packages/ec/b2/5ab808833e06c0a1b0d046a51c06ec5687b73c78b116e8d77687dc0cd515/ruff-0.9.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:2f71d09aeba026c922aa7aa19a08d7bd27c867aedb2f74285a2639644c1c12f5", size = 12524374 }, + { url = "https://files.pythonhosted.org/packages/e0/51/1432afcc3b7aa6586c480142caae5323d59750925c3559688f2a9867343f/ruff-0.9.5-py3-none-win32.whl", hash = "sha256:134f958d52aa6fdec3b294b8ebe2320a950d10c041473c4316d2e7d7c2544723", size = 9853682 }, + { url = "https://files.pythonhosted.org/packages/b7/ad/c7a900591bd152bb47fc4882a27654ea55c7973e6d5d6396298ad3fd6638/ruff-0.9.5-py3-none-win_amd64.whl", hash = "sha256:78cc6067f6d80b6745b67498fb84e87d32c6fc34992b52bffefbdae3442967d6", size = 10865744 }, + { url = "https://files.pythonhosted.org/packages/75/d9/fde7610abd53c0c76b6af72fc679cb377b27c617ba704e25da834e0a0608/ruff-0.9.5-py3-none-win_arm64.whl", hash = "sha256:18a29f1a005bddb229e580795627d297dfa99f16b30c7039e73278cf6b5f9fa9", size = 10064595 }, ] [[package]] name = "safetensors" -version = "0.4.5" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cb/46/a1c56ed856c6ac3b1a8b37abe5be0cac53219367af1331e721b04d122577/safetensors-0.4.5.tar.gz", hash = "sha256:d73de19682deabb02524b3d5d1f8b3aaba94c72f1bbfc7911b9b9d5d391c0310", size = 65702 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/38/10/0798ec2c8704c2d172620d8a3725bed92cdd75516357b1a3e64d4229ea4e/safetensors-0.4.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a63eaccd22243c67e4f2b1c3e258b257effc4acd78f3b9d397edc8cf8f1298a7", size = 392312 }, - { url = "https://files.pythonhosted.org/packages/2b/9e/9648d8dbb485c40a4a0212b7537626ae440b48156cc74601ca0b7a7615e0/safetensors-0.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:23fc9b4ec7b602915cbb4ec1a7c1ad96d2743c322f20ab709e2c35d1b66dad27", size = 381858 }, - { url = "https://files.pythonhosted.org/packages/8b/67/49556aeacc00df353767ed31d68b492fecf38c3f664c52692e4d92aa0032/safetensors-0.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6885016f34bef80ea1085b7e99b3c1f92cb1be78a49839203060f67b40aee761", size = 441382 }, - { url = "https://files.pythonhosted.org/packages/5d/ce/e9f4869a37bb11229e6cdb4e73a6ef23b4f360eee9dca5f7e40982779704/safetensors-0.4.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:133620f443450429322f238fda74d512c4008621227fccf2f8cf4a76206fea7c", size = 439001 }, - { url = "https://files.pythonhosted.org/packages/a0/27/aee8cf031b89c34caf83194ec6b7f2eed28d053fff8b6da6d00c85c56035/safetensors-0.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4fb3e0609ec12d2a77e882f07cced530b8262027f64b75d399f1504ffec0ba56", size = 478026 }, - { url = "https://files.pythonhosted.org/packages/da/33/1d9fc4805c623636e7d460f28eec92ebd1856f7a552df8eb78398a1ef4de/safetensors-0.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0f1dd769f064adc33831f5e97ad07babbd728427f98e3e1db6902e369122737", size = 495545 }, - { url = "https://files.pythonhosted.org/packages/b9/df/6f766b56690709d22e83836e4067a1109a7d84ea152a6deb5692743a2805/safetensors-0.4.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6d156bdb26732feada84f9388a9f135528c1ef5b05fae153da365ad4319c4c5", size = 435016 }, - { url = "https://files.pythonhosted.org/packages/90/fa/7bc3f18086201b1e55a42c88b822ae197d0158e12c54cd45c887305f1b7e/safetensors-0.4.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e347d77e2c77eb7624400ccd09bed69d35c0332f417ce8c048d404a096c593b", size = 456273 }, - { url = "https://files.pythonhosted.org/packages/3e/59/2ae50150d37a65c1c5f01aec74dc737707b8bbecdc76307e5a1a12c8a376/safetensors-0.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9f556eea3aec1d3d955403159fe2123ddd68e880f83954ee9b4a3f2e15e716b6", size = 619669 }, - { url = "https://files.pythonhosted.org/packages/fe/43/10f0bb597aef62c9c154152e265057089f3c729bdd980e6c32c3ec2407a4/safetensors-0.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9483f42be3b6bc8ff77dd67302de8ae411c4db39f7224dec66b0eb95822e4163", size = 605212 }, - { url = "https://files.pythonhosted.org/packages/7c/75/ede6887ea0ceaba55730988bfc7668dc147a8758f907fa6db26fbb681b8e/safetensors-0.4.5-cp310-none-win32.whl", hash = "sha256:7389129c03fadd1ccc37fd1ebbc773f2b031483b04700923c3511d2a939252cc", size = 272652 }, - { url = "https://files.pythonhosted.org/packages/ba/f0/919c72a9eef843781e652d0650f2819039943e69b69d5af2d0451a23edc3/safetensors-0.4.5-cp310-none-win_amd64.whl", hash = "sha256:e98ef5524f8b6620c8cdef97220c0b6a5c1cef69852fcd2f174bb96c2bb316b1", size = 285879 }, - { url = "https://files.pythonhosted.org/packages/9a/a5/25bcf75e373412daf1fd88045ab3aa8140a0d804ef0e70712c4f2c5b94d8/safetensors-0.4.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:21f848d7aebd5954f92538552d6d75f7c1b4500f51664078b5b49720d180e47c", size = 392256 }, - { url = "https://files.pythonhosted.org/packages/08/8c/ece3bf8756506a890bd980eca02f47f9d98dfbf5ce16eda1368f53560f67/safetensors-0.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bb07000b19d41e35eecef9a454f31a8b4718a185293f0d0b1c4b61d6e4487971", size = 381490 }, - { url = "https://files.pythonhosted.org/packages/39/83/c4a7ce01d626e46ea2b45887f2e59b16441408031e2ce2f9fe01860c6946/safetensors-0.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09dedf7c2fda934ee68143202acff6e9e8eb0ddeeb4cfc24182bef999efa9f42", size = 441093 }, - { url = "https://files.pythonhosted.org/packages/47/26/cc52de647e71bd9a0b0d78ead0d31d9c462b35550a817aa9e0cab51d6db4/safetensors-0.4.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:59b77e4b7a708988d84f26de3ebead61ef1659c73dcbc9946c18f3b1786d2688", size = 438960 }, - { url = "https://files.pythonhosted.org/packages/06/78/332538546775ee97e749867df2d58f2282d9c48a1681e4891eed8b94ec94/safetensors-0.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d3bc83e14d67adc2e9387e511097f254bd1b43c3020440e708858c684cbac68", size = 478031 }, - { url = "https://files.pythonhosted.org/packages/d9/03/a3c8663f1ddda54e624ecf43fce651659b49e8e1603c52c3e464b442acfa/safetensors-0.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39371fc551c1072976073ab258c3119395294cf49cdc1f8476794627de3130df", size = 494754 }, - { url = "https://files.pythonhosted.org/packages/e6/ee/69e498a892f208bd1da4104d4b9be887f8611bf4942144718b6738482250/safetensors-0.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6c19feda32b931cae0acd42748a670bdf56bee6476a046af20181ad3fee4090", size = 435013 }, - { url = "https://files.pythonhosted.org/packages/a2/61/f0cfce984515b86d1260f556ba3b782158e2855e6a318446ac2613786fa9/safetensors-0.4.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a659467495de201e2f282063808a41170448c78bada1e62707b07a27b05e6943", size = 455984 }, - { url = "https://files.pythonhosted.org/packages/e7/a9/3e3b48fcaade3eb4e347d39ebf0bd44291db21a3e4507854b42a7cb910ac/safetensors-0.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bad5e4b2476949bcd638a89f71b6916fa9a5cae5c1ae7eede337aca2100435c0", size = 619513 }, - { url = "https://files.pythonhosted.org/packages/80/23/2a7a1be24258c0e44c1d356896fd63dc0545a98d2d0184925fa09cd3ec76/safetensors-0.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a3a315a6d0054bc6889a17f5668a73f94f7fe55121ff59e0a199e3519c08565f", size = 604841 }, - { url = "https://files.pythonhosted.org/packages/b4/5c/34d082ff1fffffd8545fb22cbae3285ab4236f1f0cfc64b7e58261c2363b/safetensors-0.4.5-cp311-none-win32.whl", hash = "sha256:a01e232e6d3d5cf8b1667bc3b657a77bdab73f0743c26c1d3c5dd7ce86bd3a92", size = 272602 }, - { url = "https://files.pythonhosted.org/packages/6d/41/948c96c8a7e9fef57c2e051f1871c108a6dbbc6d285598bdb1d89b98617c/safetensors-0.4.5-cp311-none-win_amd64.whl", hash = "sha256:cbd39cae1ad3e3ef6f63a6f07296b080c951f24cec60188378e43d3713000c04", size = 285973 }, - { url = "https://files.pythonhosted.org/packages/bf/ac/5a63082f931e99200db95fd46fb6734f050bb6e96bf02521904c6518b7aa/safetensors-0.4.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:473300314e026bd1043cef391bb16a8689453363381561b8a3e443870937cc1e", size = 392015 }, - { url = "https://files.pythonhosted.org/packages/73/95/ab32aa6e9bdc832ff87784cdf9da26192b93de3ef82b8d1ada8f345c5044/safetensors-0.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:801183a0f76dc647f51a2d9141ad341f9665602a7899a693207a82fb102cc53e", size = 381774 }, - { url = "https://files.pythonhosted.org/packages/d6/6c/7e04b7626809fc63f3698f4c50e43aff2864b40089aa4506c918a75b8eed/safetensors-0.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1524b54246e422ad6fb6aea1ac71edeeb77666efa67230e1faf6999df9b2e27f", size = 441134 }, - { url = "https://files.pythonhosted.org/packages/58/2b/ffe7c86a277e6c1595fbdf415cfe2903f253f574a5405e93fda8baaa582c/safetensors-0.4.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b3139098e3e8b2ad7afbca96d30ad29157b50c90861084e69fcb80dec7430461", size = 438467 }, - { url = "https://files.pythonhosted.org/packages/67/9c/f271bd804e08c7fda954d17b70ff281228a88077337a9e70feace4f4cc93/safetensors-0.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65573dc35be9059770808e276b017256fa30058802c29e1038eb1c00028502ea", size = 476566 }, - { url = "https://files.pythonhosted.org/packages/4c/ad/4cf76a3e430a8a26108407fa6cb93e6f80d996a5cb75d9540c8fe3862990/safetensors-0.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd33da8e9407559f8779c82a0448e2133737f922d71f884da27184549416bfed", size = 492253 }, - { url = "https://files.pythonhosted.org/packages/d9/40/a6f75ea449a9647423ec8b6f72c16998d35aa4b43cb38536ac060c5c7bf5/safetensors-0.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3685ce7ed036f916316b567152482b7e959dc754fcc4a8342333d222e05f407c", size = 434769 }, - { url = "https://files.pythonhosted.org/packages/52/47/d4b49b1231abf3131f7bb0bc60ebb94b27ee33e0a1f9569da05f8ac65dee/safetensors-0.4.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dde2bf390d25f67908278d6f5d59e46211ef98e44108727084d4637ee70ab4f1", size = 457166 }, - { url = "https://files.pythonhosted.org/packages/c3/cd/006468b03b0fa42ff82d795d47c4193e99001e96c3f08bd62ef1b5cab586/safetensors-0.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7469d70d3de970b1698d47c11ebbf296a308702cbaae7fcb993944751cf985f4", size = 619280 }, - { url = "https://files.pythonhosted.org/packages/22/4d/b6208d918e83daa84b424c0ac3191ae61b44b3191613a3a5a7b38f94b8ad/safetensors-0.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a6ba28118636a130ccbb968bc33d4684c48678695dba2590169d5ab03a45646", size = 605390 }, - { url = "https://files.pythonhosted.org/packages/e8/20/bf0e01825dc01ed75538021a98b9a046e60ead63c6c6700764c821a8c873/safetensors-0.4.5-cp312-none-win32.whl", hash = "sha256:c859c7ed90b0047f58ee27751c8e56951452ed36a67afee1b0a87847d065eec6", size = 273250 }, - { url = "https://files.pythonhosted.org/packages/f1/5f/ab6b6cec85b40789801f35b7d2fb579ae242d8193929974a106d5ff5c835/safetensors-0.4.5-cp312-none-win_amd64.whl", hash = "sha256:b5a8810ad6a6f933fff6c276eae92c1da217b39b4d8b1bc1c0b8af2d270dc532", size = 286307 }, - { url = "https://files.pythonhosted.org/packages/90/61/0e27b1403e311cba0be20026bee4ee822d90eda7dad372179e7f18bb99f3/safetensors-0.4.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:25e5f8e2e92a74f05b4ca55686234c32aac19927903792b30ee6d7bd5653d54e", size = 392062 }, - { url = "https://files.pythonhosted.org/packages/b1/9f/cc31fafc9f5d79da10a83a820ca37f069bab0717895ad8cbcacf629dd1c5/safetensors-0.4.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:81efb124b58af39fcd684254c645e35692fea81c51627259cdf6d67ff4458916", size = 382517 }, - { url = "https://files.pythonhosted.org/packages/a4/c7/4fda8a0ebb96662550433378f4a74c677fa5fc4d0a43a7ec287d1df254a9/safetensors-0.4.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:585f1703a518b437f5103aa9cf70e9bd437cb78eea9c51024329e4fb8a3e3679", size = 441378 }, - { url = "https://files.pythonhosted.org/packages/14/31/9abb431f6209de9c80dab83e1112ebd769f1e32e7ab7ab228a02424a4693/safetensors-0.4.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b99fbf72e3faf0b2f5f16e5e3458b93b7d0a83984fe8d5364c60aa169f2da89", size = 438831 }, - { url = "https://files.pythonhosted.org/packages/37/37/99bfb195578a808b8d045159ee9264f8da58d017ac0701853dcacda14d4e/safetensors-0.4.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b17b299ca9966ca983ecda1c0791a3f07f9ca6ab5ded8ef3d283fff45f6bcd5f", size = 477112 }, - { url = "https://files.pythonhosted.org/packages/7d/05/fac3ef107e60d2a78532bed171a91669d4bb259e1236f5ea8c67a6976c75/safetensors-0.4.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:76ded72f69209c9780fdb23ea89e56d35c54ae6abcdec67ccb22af8e696e449a", size = 493373 }, - { url = "https://files.pythonhosted.org/packages/cf/7a/825800ee8c68214b4fd3506d5e19209338c69b41e01c6e14dd13969cc8b9/safetensors-0.4.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2783956926303dcfeb1de91a4d1204cd4089ab441e622e7caee0642281109db3", size = 435422 }, - { url = "https://files.pythonhosted.org/packages/5e/6c/7a3233c08bde558d6c33a41219119866cb596139a4673cc6c24024710ffd/safetensors-0.4.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d94581aab8c6b204def4d7320f07534d6ee34cd4855688004a4354e63b639a35", size = 457382 }, - { url = "https://files.pythonhosted.org/packages/a0/58/0b7bcba3788ff503990cf9278d611b56c029400612ba93e772c987b5aa03/safetensors-0.4.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:67e1e7cb8678bb1b37ac48ec0df04faf689e2f4e9e81e566b5c63d9f23748523", size = 619301 }, - { url = "https://files.pythonhosted.org/packages/82/cc/9c2cf58611daf1c83ce5d37f9de66353e23fcda36008b13fd3409a760aa3/safetensors-0.4.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:dbd280b07e6054ea68b0cb4b16ad9703e7d63cd6890f577cb98acc5354780142", size = 605580 }, - { url = "https://files.pythonhosted.org/packages/78/a7/47e05af6b39964a98396d593fd164723e442871dcf55fff0202dfff50b3b/safetensors-0.4.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cf727bb1281d66699bef5683b04d98c894a2803442c490a8d45cd365abfbdeb2", size = 393129 }, - { url = "https://files.pythonhosted.org/packages/a4/1e/643a04fa43e070da11e11c6defdf0930fb5216aa5e734fa00e238fd09ebb/safetensors-0.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:96f1d038c827cdc552d97e71f522e1049fef0542be575421f7684756a748e457", size = 383165 }, - { url = "https://files.pythonhosted.org/packages/08/94/7760694760f1e5001bd62c93155b8b7ccb652d1f4d0161d1e72b5bf9581a/safetensors-0.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:139fbee92570ecea774e6344fee908907db79646d00b12c535f66bc78bd5ea2c", size = 442391 }, - { url = "https://files.pythonhosted.org/packages/03/1c/0db6e6e5cb293907b2242447b48cc09f31478aa02f08773155c2a2db22de/safetensors-0.4.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c36302c1c69eebb383775a89645a32b9d266878fab619819ce660309d6176c9b", size = 440015 }, - { url = "https://files.pythonhosted.org/packages/15/58/9658bf7ca3a4e77577fbd2c7afda4701c558db66b01daf7cd4d9dbd9781e/safetensors-0.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d641f5b8149ea98deb5ffcf604d764aad1de38a8285f86771ce1abf8e74c4891", size = 478099 }, - { url = "https://files.pythonhosted.org/packages/9e/fa/44d9723a988dd54f43a5fcfa6b4d3a721e9294bb55d1c3e539a88619f1b2/safetensors-0.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b4db6a61d968de73722b858038c616a1bebd4a86abe2688e46ca0cc2d17558f2", size = 497170 }, - { url = "https://files.pythonhosted.org/packages/5d/80/81ba44fc82afbf5ca553913ac49460e325dc5cf00c317b34c14d43ebd76b/safetensors-0.4.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b75a616e02f21b6f1d5785b20cecbab5e2bd3f6358a90e8925b813d557666ec1", size = 436076 }, - { url = "https://files.pythonhosted.org/packages/2e/ad/7880a359b0f93322689804bdbe1e9a3110652963478712933ff04a3d45c3/safetensors-0.4.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:788ee7d04cc0e0e7f944c52ff05f52a4415b312f5efd2ee66389fb7685ee030c", size = 456901 }, - { url = "https://files.pythonhosted.org/packages/89/4f/0b61e4add7ea9dfa8141d0bb1b8357e3a08730a020c3a287f0e889c386b5/safetensors-0.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:87bc42bd04fd9ca31396d3ca0433db0be1411b6b53ac5a32b7845a85d01ffc2e", size = 620159 }, - { url = "https://files.pythonhosted.org/packages/a9/60/544687daf8ce8dc9a74260992ac058d7e3f20c91eada5ca232898d005149/safetensors-0.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4037676c86365a721a8c9510323a51861d703b399b78a6b4486a54a65a975fca", size = 605993 }, - { url = "https://files.pythonhosted.org/packages/98/9a/2889d9df45ee09a02a17b3349c5649dc5516d1d167515b520e4aa79bdc5b/safetensors-0.4.5-cp39-none-win32.whl", hash = "sha256:1500418454529d0ed5c1564bda376c4ddff43f30fce9517d9bee7bcce5a8ef50", size = 272930 }, - { url = "https://files.pythonhosted.org/packages/ce/00/a4bdf45a5f2e1db08aaf95bb97f8ca30ec9568573eda03ec0db9ce5ed5d2/safetensors-0.4.5-cp39-none-win_amd64.whl", hash = "sha256:9d1a94b9d793ed8fe35ab6d5cea28d540a46559bafc6aae98f30ee0867000cab", size = 286065 }, - { url = "https://files.pythonhosted.org/packages/cf/ff/037ae4c0ee32db496669365e66079b6329906c6814722b159aa700e67208/safetensors-0.4.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fdadf66b5a22ceb645d5435a0be7a0292ce59648ca1d46b352f13cff3ea80410", size = 392951 }, - { url = "https://files.pythonhosted.org/packages/f1/d6/6621e16b35bf83ae099eaab07338f04991a26c9aa43879d05f19f35e149c/safetensors-0.4.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d42ffd4c2259f31832cb17ff866c111684c87bd930892a1ba53fed28370c918c", size = 383417 }, - { url = "https://files.pythonhosted.org/packages/ae/88/3068e1bb16f5e9f9068901de3cf7b3db270b9bfe6e7d51d4b55c1da0425d/safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd8a1f6d2063a92cd04145c7fd9e31a1c7d85fbec20113a14b487563fdbc0597", size = 442311 }, - { url = "https://files.pythonhosted.org/packages/f7/15/a2bb77ebbaa76b61ec2e9f731fe4db7f9473fd855d881957c51b3a168892/safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:951d2fcf1817f4fb0ef0b48f6696688a4e852a95922a042b3f96aaa67eedc920", size = 436678 }, - { url = "https://files.pythonhosted.org/packages/ec/79/9608c4546cdbfe3860dd7aa59e3562c9289113398b1a0bd89b68ce0a9d41/safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ac85d9a8c1af0e3132371d9f2d134695a06a96993c2e2f0bbe25debb9e3f67a", size = 457316 }, - { url = "https://files.pythonhosted.org/packages/0f/23/b17b483f2857835962ad33e38014efd4911791187e177bc23b057d35bee8/safetensors-0.4.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e3cec4a29eb7fe8da0b1c7988bc3828183080439dd559f720414450de076fcab", size = 620565 }, - { url = "https://files.pythonhosted.org/packages/19/46/5d11dc300feaad285c2f1bd784ff3f689f5e0ab6be49aaf568f3a77019eb/safetensors-0.4.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:21742b391b859e67b26c0b2ac37f52c9c0944a879a25ad2f9f9f3cd61e7fda8f", size = 606660 }, - { url = "https://files.pythonhosted.org/packages/5b/f9/539335e927cfeca8effc972d47e06155c4a39989905082c02b5c72769c41/safetensors-0.4.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f4beb84b6073b1247a773141a6331117e35d07134b3bb0383003f39971d414bb", size = 393986 }, - { url = "https://files.pythonhosted.org/packages/72/c6/988925bae113bb280642329fcbbfb502ba1bc9720b6be47c1f4c1fb7cc87/safetensors-0.4.5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:68814d599d25ed2fdd045ed54d370d1d03cf35e02dce56de44c651f828fb9b7b", size = 384563 }, - { url = "https://files.pythonhosted.org/packages/b3/ff/b26d78b6100a08e57a1986ab71a2f9f093ba9943626f4967cd514cd43de2/safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0b6453c54c57c1781292c46593f8a37254b8b99004c68d6c3ce229688931a22", size = 442275 }, - { url = "https://files.pythonhosted.org/packages/71/29/6ac541358a07ec593ec9e88636908010bc9bf56c8018e0d25b4481adb64a/safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adaa9c6dead67e2dd90d634f89131e43162012479d86e25618e821a03d1eb1dc", size = 437217 }, - { url = "https://files.pythonhosted.org/packages/2b/f8/258564b71fe95d0117356e6915b1c0128f1ec3031cf8522a28f9d2108b47/safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73e7d408e9012cd17511b382b43547850969c7979efc2bc353f317abaf23c84c", size = 458132 }, - { url = "https://files.pythonhosted.org/packages/18/ac/510eebf3ac521fec3b0ea78e654e22d85de3406613209d20133b5b3cca33/safetensors-0.4.5-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:775409ce0fcc58b10773fdb4221ed1eb007de10fe7adbdf8f5e8a56096b6f0bc", size = 621171 }, - { url = "https://files.pythonhosted.org/packages/e0/c8/a02b635e39f3b904f52aff099505bdfbb40252d2d18a05e7fedc0bb64a28/safetensors-0.4.5-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:834001bed193e4440c4a3950a31059523ee5090605c907c66808664c932b549c", size = 607366 }, +version = "0.5.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f4/4f/2ef9ef1766f8c194b01b67a63a444d2e557c8fe1d82faf3ebd85f370a917/safetensors-0.5.2.tar.gz", hash = "sha256:cb4a8d98ba12fa016f4241932b1fc5e702e5143f5374bba0bbcf7ddc1c4cf2b8", size = 66957 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/96/d1/017e31e75e274492a11a456a9e7c171f8f7911fe50735b4ec6ff37221220/safetensors-0.5.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:45b6092997ceb8aa3801693781a71a99909ab9cc776fbc3fa9322d29b1d3bef2", size = 427067 }, + { url = "https://files.pythonhosted.org/packages/24/84/e9d3ff57ae50dd0028f301c9ee064e5087fe8b00e55696677a0413c377a7/safetensors-0.5.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:6d0d6a8ee2215a440e1296b843edf44fd377b055ba350eaba74655a2fe2c4bae", size = 408856 }, + { url = "https://files.pythonhosted.org/packages/f1/1d/fe95f5dd73db16757b11915e8a5106337663182d0381811c81993e0014a9/safetensors-0.5.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86016d40bcaa3bcc9a56cd74d97e654b5f4f4abe42b038c71e4f00a089c4526c", size = 450088 }, + { url = "https://files.pythonhosted.org/packages/cf/21/e527961b12d5ab528c6e47b92d5f57f33563c28a972750b238b871924e49/safetensors-0.5.2-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:990833f70a5f9c7d3fc82c94507f03179930ff7d00941c287f73b6fcbf67f19e", size = 458966 }, + { url = "https://files.pythonhosted.org/packages/a5/8b/1a037d7a57f86837c0b41905040369aea7d8ca1ec4b2a77592372b2ec380/safetensors-0.5.2-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dfa7c2f3fe55db34eba90c29df94bcdac4821043fc391cb5d082d9922013869", size = 509915 }, + { url = "https://files.pythonhosted.org/packages/61/3d/03dd5cfd33839df0ee3f4581a20bd09c40246d169c0e4518f20b21d5f077/safetensors-0.5.2-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:46ff2116150ae70a4e9c490d2ab6b6e1b1b93f25e520e540abe1b81b48560c3a", size = 527664 }, + { url = "https://files.pythonhosted.org/packages/c5/dc/8952caafa9a10a3c0f40fa86bacf3190ae7f55fa5eef87415b97b29cb97f/safetensors-0.5.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ab696dfdc060caffb61dbe4066b86419107a24c804a4e373ba59be699ebd8d5", size = 461978 }, + { url = "https://files.pythonhosted.org/packages/60/da/82de1fcf1194e3dbefd4faa92dc98b33c06bed5d67890e0962dd98e18287/safetensors-0.5.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03c937100f38c9ff4c1507abea9928a6a9b02c9c1c9c3609ed4fb2bf413d4975", size = 491253 }, + { url = "https://files.pythonhosted.org/packages/5a/9a/d90e273c25f90c3ba1b0196a972003786f04c39e302fbd6649325b1272bb/safetensors-0.5.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a00e737948791b94dad83cf0eafc09a02c4d8c2171a239e8c8572fe04e25960e", size = 628644 }, + { url = "https://files.pythonhosted.org/packages/70/3c/acb23e05aa34b4f5edd2e7f393f8e6480fbccd10601ab42cd03a57d4ab5f/safetensors-0.5.2-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:d3a06fae62418ec8e5c635b61a8086032c9e281f16c63c3af46a6efbab33156f", size = 721648 }, + { url = "https://files.pythonhosted.org/packages/71/45/eaa3dba5253a7c6931230dc961641455710ab231f8a89cb3c4c2af70f8c8/safetensors-0.5.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:1506e4c2eda1431099cebe9abf6c76853e95d0b7a95addceaa74c6019c65d8cf", size = 659588 }, + { url = "https://files.pythonhosted.org/packages/b0/71/2f9851164f821064d43b481ddbea0149c2d676c4f4e077b178e7eeaa6660/safetensors-0.5.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5c5b5d9da594f638a259fca766046f44c97244cc7ab8bef161b3e80d04becc76", size = 632533 }, + { url = "https://files.pythonhosted.org/packages/00/f1/5680e2ef61d9c61454fad82c344f0e40b8741a9dbd1e31484f0d31a9b1c3/safetensors-0.5.2-cp38-abi3-win32.whl", hash = "sha256:fe55c039d97090d1f85277d402954dd6ad27f63034fa81985a9cc59655ac3ee2", size = 291167 }, + { url = "https://files.pythonhosted.org/packages/86/ca/aa489392ec6fb59223ffce825461e1f811a3affd417121a2088be7a5758b/safetensors-0.5.2-cp38-abi3-win_amd64.whl", hash = "sha256:78abdddd03a406646107f973c7843276e7b64e5e32623529dc17f3d94a20f589", size = 303756 }, ] [[package]] @@ -1600,20 +1654,20 @@ wheels = [ [[package]] name = "setuptools" -version = "75.4.0" +version = "75.8.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e2/73/c1ccf3e057ef6331cc6861412905dc218203bde46dfe8262c1631aa7fb11/setuptools-75.4.0.tar.gz", hash = "sha256:1dc484f5cf56fd3fe7216d7b8df820802e7246cfb534a1db2aa64f14fcb9cdcb", size = 1336593 } +sdist = { url = "https://files.pythonhosted.org/packages/92/ec/089608b791d210aec4e7f97488e67ab0d33add3efccb83a056cbafe3a2a6/setuptools-75.8.0.tar.gz", hash = "sha256:c5afc8f407c626b8313a86e10311dd3f661c6cd9c09d4bf8c15c0e11f9f2b0e6", size = 1343222 } wheels = [ - { url = "https://files.pythonhosted.org/packages/21/df/7c6bb83dcb45b35dc35b310d752f254211cde0bcd2a35290ea6e2862b2a9/setuptools-75.4.0-py3-none-any.whl", hash = "sha256:b3c5d862f98500b06ffdf7cc4499b48c46c317d8d56cb30b5c8bce4d88f5c216", size = 1223131 }, + { url = "https://files.pythonhosted.org/packages/69/8a/b9dc7678803429e4a3bc9ba462fa3dd9066824d3c607490235c6a796be5a/setuptools-75.8.0-py3-none-any.whl", hash = "sha256:e3982f444617239225d675215d51f6ba05f845d4eec313da4418fdbb56fb27e3", size = 1228782 }, ] [[package]] name = "six" -version = "1.16.0" +version = "1.17.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/71/39/171f1c67cd00715f190ba0b100d606d440a28c93c7714febeca8b79af85e/six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", size = 34041 } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031 } wheels = [ - { url = "https://files.pythonhosted.org/packages/d9/5a/e7c31adbe875f2abbb91bd84cf2dc52d792b5a01506781dbcf25c91daf11/six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254", size = 11053 }, + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050 }, ] [[package]] @@ -1665,14 +1719,14 @@ wheels = [ [[package]] name = "sphinx-autodoc-typehints" -version = "1.23.0" +version = "2.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "sphinx" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/46/30/9764a2c735c655c3065f32072fb3d8c6fd5dda8df294d4e9f05670d60e31/sphinx_autodoc_typehints-1.23.0.tar.gz", hash = "sha256:5d44e2996633cdada499b6d27a496ddf9dbc95dd1f0f09f7b37940249e61f6e9", size = 35945 } +sdist = { url = "https://files.pythonhosted.org/packages/74/cd/03e7b917230dc057922130a79ba0240df1693bfd76727ea33fae84b39138/sphinx_autodoc_typehints-2.3.0.tar.gz", hash = "sha256:535c78ed2d6a1bad393ba9f3dfa2602cf424e2631ee207263e07874c38fde084", size = 40709 } wheels = [ - { url = "https://files.pythonhosted.org/packages/60/be/792b64ddacfcff362062077689ce37eb9750b9924fc0a14f623fa71ffaf6/sphinx_autodoc_typehints-1.23.0-py3-none-any.whl", hash = "sha256:ac099057e66b09e51b698058ba7dd76e57e1fe696cd91b54e121d3dad188f91d", size = 17896 }, + { url = "https://files.pythonhosted.org/packages/a0/f3/e0a4ce49da4b6f4e4ce84b3c39a0677831884cb9d8a87ccbf1e9e56e53ac/sphinx_autodoc_typehints-2.3.0-py3-none-any.whl", hash = "sha256:3098e2c6d0ba99eacd013eb06861acc9b51c6e595be86ab05c08ee5506ac0c67", size = 19836 }, ] [[package]] @@ -1870,142 +1924,117 @@ wheels = [ [[package]] name = "tokenizers" -version = "0.20.3" +version = "0.21.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "huggingface-hub" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/da/25/b1681c1c30ea3ea6e584ae3fffd552430b12faa599b558c4c4783f56d7ff/tokenizers-0.20.3.tar.gz", hash = "sha256:2278b34c5d0dd78e087e1ca7f9b1dcbf129d80211afa645f214bd6e051037539", size = 340513 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/51/421bb0052fc4333f7c1e3231d8c6607552933d919b628c8fabd06f60ba1e/tokenizers-0.20.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:31ccab28dbb1a9fe539787210b0026e22debeab1662970f61c2d921f7557f7e4", size = 2674308 }, - { url = "https://files.pythonhosted.org/packages/a6/e9/f651f8d27614fd59af387f4dfa568b55207e5fac8d06eec106dc00b921c4/tokenizers-0.20.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6361191f762bda98c773da418cf511cbaa0cb8d0a1196f16f8c0119bde68ff8", size = 2559363 }, - { url = "https://files.pythonhosted.org/packages/e3/e8/0e9f81a09ab79f409eabfd99391ca519e315496694671bebca24c3e90448/tokenizers-0.20.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f128d5da1202b78fa0a10d8d938610472487da01b57098d48f7e944384362514", size = 2892896 }, - { url = "https://files.pythonhosted.org/packages/b0/72/15fdbc149e05005e99431ecd471807db2241983deafe1e704020f608f40e/tokenizers-0.20.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:79c4121a2e9433ad7ef0769b9ca1f7dd7fa4c0cd501763d0a030afcbc6384481", size = 2802785 }, - { url = "https://files.pythonhosted.org/packages/26/44/1f8aea48f9bb117d966b7272484671b33a509f6217a8e8544d79442c90db/tokenizers-0.20.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7850fde24197fe5cd6556e2fdba53a6d3bae67c531ea33a3d7c420b90904141", size = 3086060 }, - { url = "https://files.pythonhosted.org/packages/2e/83/82ba40da99870b3a0b801cffaf4f099f088a84c7e07d32cc6ca751ce08e6/tokenizers-0.20.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b357970c095dc134978a68c67d845a1e3803ab7c4fbb39195bde914e7e13cf8b", size = 3096760 }, - { url = "https://files.pythonhosted.org/packages/f3/46/7a025404201d937f86548928616c0a164308aa3998e546efdf798bf5ee9c/tokenizers-0.20.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a333d878c4970b72d6c07848b90c05f6b045cf9273fc2bc04a27211721ad6118", size = 3380165 }, - { url = "https://files.pythonhosted.org/packages/aa/49/15fae66ac62e49255eeedbb7f4127564b2c3f3aef2009913f525732d1a08/tokenizers-0.20.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fd9fee817f655a8f50049f685e224828abfadd436b8ff67979fc1d054b435f1", size = 2994038 }, - { url = "https://files.pythonhosted.org/packages/f4/64/693afc9ba2393c2eed85c02bacb44762f06a29f0d1a5591fa5b40b39c0a2/tokenizers-0.20.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9e7816808b402129393a435ea2a509679b41246175d6e5e9f25b8692bfaa272b", size = 8977285 }, - { url = "https://files.pythonhosted.org/packages/be/7e/6126c18694310fe07970717929e889898767c41fbdd95b9078e8aec0f9ef/tokenizers-0.20.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ba96367db9d8a730d3a1d5996b4b7babb846c3994b8ef14008cd8660f55db59d", size = 9294890 }, - { url = "https://files.pythonhosted.org/packages/71/7d/5e3307a1091c8608a1e58043dff49521bc19553c6e9548c7fac6840cc2c4/tokenizers-0.20.3-cp310-none-win32.whl", hash = "sha256:ee31ba9d7df6a98619426283e80c6359f167e2e9882d9ce1b0254937dbd32f3f", size = 2196883 }, - { url = "https://files.pythonhosted.org/packages/47/62/aaf5b2a526b3b10c20985d9568ff8c8f27159345eaef3347831e78cd5894/tokenizers-0.20.3-cp310-none-win_amd64.whl", hash = "sha256:a845c08fdad554fe0871d1255df85772f91236e5fd6b9287ef8b64f5807dbd0c", size = 2381637 }, - { url = "https://files.pythonhosted.org/packages/c6/93/6742ef9206409d5ce1fdf44d5ca1687cdc3847ba0485424e2c731e6bcf67/tokenizers-0.20.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:585b51e06ca1f4839ce7759941e66766d7b060dccfdc57c4ca1e5b9a33013a90", size = 2674224 }, - { url = "https://files.pythonhosted.org/packages/aa/14/e75ece72e99f6ef9ae07777ca9fdd78608f69466a5cecf636e9bd2f25d5c/tokenizers-0.20.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61cbf11954f3b481d08723ebd048ba4b11e582986f9be74d2c3bdd9293a4538d", size = 2558991 }, - { url = "https://files.pythonhosted.org/packages/46/54/033b5b2ba0c3ae01e026c6f7ced147d41a2fa1c573d00a66cb97f6d7f9b3/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef820880d5e4e8484e2fa54ff8d297bb32519eaa7815694dc835ace9130a3eea", size = 2892476 }, - { url = "https://files.pythonhosted.org/packages/e6/b0/cc369fb3297d61f3311cab523d16d48c869dc2f0ba32985dbf03ff811041/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:67ef4dcb8841a4988cd00dd288fb95dfc8e22ed021f01f37348fd51c2b055ba9", size = 2802775 }, - { url = "https://files.pythonhosted.org/packages/1a/74/62ad983e8ea6a63e04ed9c5be0b605056bf8aac2f0125f9b5e0b3e2b89fa/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff1ef8bd47a02b0dc191688ccb4da53600df5d4c9a05a4b68e1e3de4823e78eb", size = 3086138 }, - { url = "https://files.pythonhosted.org/packages/6b/ac/4637ba619db25094998523f9e6f5b456e1db1f8faa770a3d925d436db0c3/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:444d188186eab3148baf0615b522461b41b1f0cd58cd57b862ec94b6ac9780f1", size = 3098076 }, - { url = "https://files.pythonhosted.org/packages/58/ce/9793f2dc2ce529369807c9c74e42722b05034af411d60f5730b720388c7d/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37c04c032c1442740b2c2d925f1857885c07619224a533123ac7ea71ca5713da", size = 3379650 }, - { url = "https://files.pythonhosted.org/packages/50/f6/2841de926bc4118af996eaf0bdf0ea5b012245044766ffc0347e6c968e63/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:453c7769d22231960ee0e883d1005c93c68015025a5e4ae56275406d94a3c907", size = 2994005 }, - { url = "https://files.pythonhosted.org/packages/a3/b2/00915c4fed08e9505d37cf6eaab45b12b4bff8f6719d459abcb9ead86a4b/tokenizers-0.20.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4bb31f7b2847e439766aaa9cc7bccf7ac7088052deccdb2275c952d96f691c6a", size = 8977488 }, - { url = "https://files.pythonhosted.org/packages/e9/ac/1c069e7808181ff57bcf2d39e9b6fbee9133a55410e6ebdaa89f67c32e83/tokenizers-0.20.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:843729bf0f991b29655a069a2ff58a4c24375a553c70955e15e37a90dd4e045c", size = 9294935 }, - { url = "https://files.pythonhosted.org/packages/50/47/722feb70ee68d1c4412b12d0ea4acc2713179fd63f054913990f9e259492/tokenizers-0.20.3-cp311-none-win32.whl", hash = "sha256:efcce3a927b1e20ca694ba13f7a68c59b0bd859ef71e441db68ee42cf20c2442", size = 2197175 }, - { url = "https://files.pythonhosted.org/packages/75/68/1b4f928b15a36ed278332ac75d66d7eb65d865bf344d049c452c18447bf9/tokenizers-0.20.3-cp311-none-win_amd64.whl", hash = "sha256:88301aa0801f225725b6df5dea3d77c80365ff2362ca7e252583f2b4809c4cc0", size = 2381616 }, - { url = "https://files.pythonhosted.org/packages/07/00/92a08af2a6b0c88c50f1ab47d7189e695722ad9714b0ee78ea5e1e2e1def/tokenizers-0.20.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:49d12a32e190fad0e79e5bdb788d05da2f20d8e006b13a70859ac47fecf6ab2f", size = 2667951 }, - { url = "https://files.pythonhosted.org/packages/ec/9a/e17a352f0bffbf415cf7d73756f5c73a3219225fc5957bc2f39d52c61684/tokenizers-0.20.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:282848cacfb9c06d5e51489f38ec5aa0b3cd1e247a023061945f71f41d949d73", size = 2555167 }, - { url = "https://files.pythonhosted.org/packages/27/37/d108df55daf4f0fcf1f58554692ff71687c273d870a34693066f0847be96/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abe4e08c7d0cd6154c795deb5bf81d2122f36daf075e0c12a8b050d824ef0a64", size = 2898389 }, - { url = "https://files.pythonhosted.org/packages/b2/27/32f29da16d28f59472fa7fb38e7782069748c7e9ab9854522db20341624c/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ca94fc1b73b3883c98f0c88c77700b13d55b49f1071dfd57df2b06f3ff7afd64", size = 2795866 }, - { url = "https://files.pythonhosted.org/packages/29/4e/8a9a3c89e128c4a40f247b501c10279d2d7ade685953407c4d94c8c0f7a7/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef279c7e239f95c8bdd6ff319d9870f30f0d24915b04895f55b1adcf96d6c60d", size = 3085446 }, - { url = "https://files.pythonhosted.org/packages/b4/3b/a2a7962c496ebcd95860ca99e423254f760f382cd4bd376f8895783afaf5/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16384073973f6ccbde9852157a4fdfe632bb65208139c9d0c0bd0176a71fd67f", size = 3094378 }, - { url = "https://files.pythonhosted.org/packages/1f/f4/a8a33f0192a1629a3bd0afcad17d4d221bbf9276da4b95d226364208d5eb/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:312d522caeb8a1a42ebdec87118d99b22667782b67898a76c963c058a7e41d4f", size = 3385755 }, - { url = "https://files.pythonhosted.org/packages/9e/65/c83cb3545a65a9eaa2e13b22c93d5e00bd7624b354a44adbdc93d5d9bd91/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2b7cb962564785a83dafbba0144ecb7f579f1d57d8c406cdaa7f32fe32f18ad", size = 2997679 }, - { url = "https://files.pythonhosted.org/packages/55/e9/a80d4e592307688a67c7c59ab77e03687b6a8bd92eb5db763a2c80f93f57/tokenizers-0.20.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:124c5882ebb88dadae1fc788a582299fcd3a8bd84fc3e260b9918cf28b8751f5", size = 8989296 }, - { url = "https://files.pythonhosted.org/packages/90/af/60c957af8d2244321124e893828f1a4817cde1a2d08d09d423b73f19bd2f/tokenizers-0.20.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2b6e54e71f84c4202111a489879005cb14b92616a87417f6c102c833af961ea2", size = 9303621 }, - { url = "https://files.pythonhosted.org/packages/be/a9/96172310ee141009646d63a1ca267c099c462d747fe5ef7e33f74e27a683/tokenizers-0.20.3-cp312-none-win32.whl", hash = "sha256:83d9bfbe9af86f2d9df4833c22e94d94750f1d0cd9bfb22a7bb90a86f61cdb1c", size = 2188979 }, - { url = "https://files.pythonhosted.org/packages/bd/68/61d85ae7ae96dde7d0974ff3538db75d5cdc29be2e4329cd7fc51a283e22/tokenizers-0.20.3-cp312-none-win_amd64.whl", hash = "sha256:44def74cee574d609a36e17c8914311d1b5dbcfe37c55fd29369d42591b91cf2", size = 2380725 }, - { url = "https://files.pythonhosted.org/packages/07/19/36e9eaafb229616cb8502b42030fa7fe347550e76cb618de71b498fc3222/tokenizers-0.20.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0b630e0b536ef0e3c8b42c685c1bc93bd19e98c0f1543db52911f8ede42cf84", size = 2666813 }, - { url = "https://files.pythonhosted.org/packages/b9/c7/e2ce1d4f756c8a62ef93fdb4df877c2185339b6d63667b015bf70ea9d34b/tokenizers-0.20.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a02d160d2b19bcbfdf28bd9a4bf11be4cb97d0499c000d95d4c4b1a4312740b6", size = 2555354 }, - { url = "https://files.pythonhosted.org/packages/7c/cf/5309c2d173a6a67f9ec8697d8e710ea32418de6fd8541778032c202a1c3e/tokenizers-0.20.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e3d80d89b068bc30034034b5319218c7c0a91b00af19679833f55f3becb6945", size = 2897745 }, - { url = "https://files.pythonhosted.org/packages/2c/e5/af3078e32f225e680e69d61f78855880edb8d53f5850a1834d519b2b103f/tokenizers-0.20.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:174a54910bed1b089226512b4458ea60d6d6fd93060254734d3bc3540953c51c", size = 2794385 }, - { url = "https://files.pythonhosted.org/packages/0b/a7/bc421fe46650cc4eb4a913a236b88c243204f32c7480684d2f138925899e/tokenizers-0.20.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:098b8a632b8656aa5802c46689462c5c48f02510f24029d71c208ec2c822e771", size = 3084580 }, - { url = "https://files.pythonhosted.org/packages/c6/22/97e1e95ee81f75922c9f569c23cb2b1fdc7f5a7a29c4c9fae17e63f751a6/tokenizers-0.20.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78c8c143e3ae41e718588281eb3e212c2b31623c9d6d40410ec464d7d6221fb5", size = 3093581 }, - { url = "https://files.pythonhosted.org/packages/d5/14/f0df0ee3b9e516121e23c0099bccd7b9f086ba9150021a750e99b16ce56f/tokenizers-0.20.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b26b0aadb18cd8701077362ba359a06683662d5cafe3e8e8aba10eb05c037f1", size = 3385934 }, - { url = "https://files.pythonhosted.org/packages/66/52/7a171bd4929e3ffe61a29b4340fe5b73484709f92a8162a18946e124c34c/tokenizers-0.20.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07d7851a72717321022f3774e84aa9d595a041d643fafa2e87fbc9b18711dac0", size = 2997311 }, - { url = "https://files.pythonhosted.org/packages/7c/64/f1993bb8ebf775d56875ca0d50a50f2648bfbbb143da92fe2e6ceeb4abd5/tokenizers-0.20.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:bd44e48a430ada902c6266a8245f5036c4fe744fcb51f699999fbe82aa438797", size = 8988601 }, - { url = "https://files.pythonhosted.org/packages/d6/3f/49fa63422159bbc2f2a4ac5bfc597d04d4ec0ad3d2ef46649b5e9a340e37/tokenizers-0.20.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:a4c186bb006ccbe1f5cc4e0380d1ce7806f5955c244074fd96abc55e27b77f01", size = 9303950 }, - { url = "https://files.pythonhosted.org/packages/66/11/79d91aeb2817ad1993ef61c690afe73e6dbedbfb21918b302ef5a2ba9bfb/tokenizers-0.20.3-cp313-none-win32.whl", hash = "sha256:6e19e0f1d854d6ab7ea0c743d06e764d1d9a546932be0a67f33087645f00fe13", size = 2188941 }, - { url = "https://files.pythonhosted.org/packages/c2/ff/ac8410f868fb8b14b5e619efa304aa119cb8a40bd7df29fc81a898e64f99/tokenizers-0.20.3-cp313-none-win_amd64.whl", hash = "sha256:d50ede425c7e60966a9680d41b58b3a0950afa1bb570488e2972fa61662c4273", size = 2380269 }, - { url = "https://files.pythonhosted.org/packages/42/a8/ccc7be89a644aeba926a7c8779d659e856f4af4ee8fbdfb71a07f6a98a84/tokenizers-0.20.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:93e37f0269a11dc3b1a953f1fca9707f0929ebf8b4063c591c71a0664219988e", size = 2674607 }, - { url = "https://files.pythonhosted.org/packages/e7/8a/29388a69722188352f5f9006a392d692e4739688779475713e552ef3a1b3/tokenizers-0.20.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f4cb0c614b0135e781de96c2af87e73da0389ac1458e2a97562ed26e29490d8d", size = 2560100 }, - { url = "https://files.pythonhosted.org/packages/b0/39/073836c1d73e63268b1c67a682a8ba23e2688a43e737166be45ab8243701/tokenizers-0.20.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7eb2fb1c432f5746b22f8a7f09fc18c4156cb0031c77f53cb19379d82d43297a", size = 2893676 }, - { url = "https://files.pythonhosted.org/packages/c1/d9/b9ff819c3df4bc73ad93629804f7b85321a78bc2da4f54fb774a90e995c6/tokenizers-0.20.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bfa8d029bb156181b006643309d6b673615a24e4ed24cf03aa191d599b996f51", size = 2804173 }, - { url = "https://files.pythonhosted.org/packages/3e/d5/6b2b519ba2d9a6d3435f22918f0ad5850c40cf5357f6d989e6d68ef40fb9/tokenizers-0.20.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f90549622de3bf476ad9f1dd6f3f952ec3ed6ab8615ae88ef060d0c5bfad55d", size = 3086866 }, - { url = "https://files.pythonhosted.org/packages/01/e1/d96e90ef872dd9b3a4b7a78874411f1c48476019f95a87a2cfd54c470a57/tokenizers-0.20.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1d469c74eebf5c43fd61cd9b030e271d17198edd7bd45392e03a3c091d7d6d4", size = 3099004 }, - { url = "https://files.pythonhosted.org/packages/0c/6a/a94248dc5915907e18d55c9739cd018f5aeb4146f198622f45f9748dcb9f/tokenizers-0.20.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bee8f53b2594749f4460d53253bae55d718f04e9b633efa0f5df8938bd98e4f0", size = 3381574 }, - { url = "https://files.pythonhosted.org/packages/29/9e/c95f8821d6bc93eba7c5db95e6299c009db523d1c646da8563b42ad892c4/tokenizers-0.20.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:938441babf3e5720e4459e306ef2809fb267680df9d1ff2873458b22aef60248", size = 2994953 }, - { url = "https://files.pythonhosted.org/packages/95/ff/01fdcf9a77776730baf63a9f66adf75c3aa4bdb1bdc77c7d1a3e03b2a25e/tokenizers-0.20.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7310ab23d7b0caebecc0e8be11a1146f320f5f07284000f6ea54793e83de1b75", size = 8977698 }, - { url = "https://files.pythonhosted.org/packages/ef/2d/8b823741c64e9726b82076fa09f6d66285b61bd2c77e109871415b1ed9e2/tokenizers-0.20.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:16121eb030a2b13094cfec936b0c12e8b4063c5f839591ea7d0212336d8f9921", size = 9295649 }, - { url = "https://files.pythonhosted.org/packages/7d/ed/06e4c10020f3c26faf62dcbe786d8dfad60ca119bb1f3e5f32dccd0ce9b4/tokenizers-0.20.3-cp39-none-win32.whl", hash = "sha256:401cc21ef642ee235985d747f65e18f639464d377c70836c9003df208d582064", size = 2197165 }, - { url = "https://files.pythonhosted.org/packages/0d/e3/ad08926d9a9dd238ec67d429db13f34db31bc4ecd726207fa95b90779462/tokenizers-0.20.3-cp39-none-win_amd64.whl", hash = "sha256:7498f3ea7746133335a6adb67a77cf77227a8b82c8483f644a2e5f86fea42b8d", size = 2382146 }, - { url = "https://files.pythonhosted.org/packages/29/cd/ff1586dd572aaf1637d59968df3f6f6532fa255f4638fbc29f6d27e0b690/tokenizers-0.20.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e919f2e3e68bb51dc31de4fcbbeff3bdf9c1cad489044c75e2b982a91059bd3c", size = 2672044 }, - { url = "https://files.pythonhosted.org/packages/b5/9e/7a2c00abbc8edb021ee0b1f12aab76a7b7824b49f94bcd9f075d0818d4b0/tokenizers-0.20.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b8e9608f2773996cc272156e305bd79066163a66b0390fe21750aff62df1ac07", size = 2558841 }, - { url = "https://files.pythonhosted.org/packages/8e/c1/6af62ef61316f33ecf785bbb2bee4292f34ea62b491d4480ad9b09acf6b6/tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39270a7050deaf50f7caff4c532c01b3c48f6608d42b3eacdebdc6795478c8df", size = 2897936 }, - { url = "https://files.pythonhosted.org/packages/9a/0b/c076b2ff3ee6dc70c805181fbe325668b89cfee856f8dfa24cc9aa293c84/tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e005466632b1c5d2d2120f6de8aa768cc9d36cd1ab7d51d0c27a114c91a1e6ee", size = 3082688 }, - { url = "https://files.pythonhosted.org/packages/0a/60/56510124933136c2e90879e1c81603cfa753ae5a87830e3ef95056b20d8f/tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a07962340b36189b6c8feda552ea1bfeee6cf067ff922a1d7760662c2ee229e5", size = 2998924 }, - { url = "https://files.pythonhosted.org/packages/68/60/4107b618b7b9155cb34ad2e0fc90946b7e71f041b642122fb6314f660688/tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:55046ad3dd5f2b3c67501fcc8c9cbe3e901d8355f08a3b745e9b57894855f85b", size = 8989514 }, - { url = "https://files.pythonhosted.org/packages/e8/bd/48475818e614b73316baf37ac1e4e51b578bbdf58651812d7e55f43b88d8/tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:efcf0eb939988b627558aaf2b9dc3e56d759cad2e0cfa04fcab378e4b48fc4fd", size = 9303476 }, - { url = "https://files.pythonhosted.org/packages/55/ba/f0b0c5dd6a2eb4ac83fd890f1f6e402a8f245faeeca37b52b794fe738ed9/tokenizers-0.20.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:de082392a85eb0055cc055c535bff2f0cc15d7a000bdc36fbf601a0f3cf8507a", size = 2672725 }, - { url = "https://files.pythonhosted.org/packages/eb/6d/2d9f5a93f88470f8dae7b2069734ba0a5d30659761ce5a6067913e7d4333/tokenizers-0.20.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c3db46cc0647bfd88263afdb739b92017a02a87ee30945cb3e86c7e25c7c9917", size = 2559213 }, - { url = "https://files.pythonhosted.org/packages/ce/32/37ff2ced2c169c2e7586fcd51314f59d02c60fd2eeafea527c2f9d1bb512/tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a292392f24ab9abac5cfa8197e5a6208f2e43723420217e1ceba0b4ec77816ac", size = 2897613 }, - { url = "https://files.pythonhosted.org/packages/79/e4/fdd7ad2aedaa4a3f148aa28670bf0b0856211a3fec3e6554ed6ceec9a928/tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dcd91f4e60f62b20d83a87a84fe062035a1e3ff49a8c2bbdeb2d441c8e311f4", size = 3085434 }, - { url = "https://files.pythonhosted.org/packages/e0/b8/479ab7349faf1da001b861ea521055ad18a34a9b1053079e0c9b5c476f50/tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:900991a2b8ee35961b1095db7e265342e0e42a84c1a594823d5ee9f8fb791958", size = 2998651 }, - { url = "https://files.pythonhosted.org/packages/6b/7f/3a1d5ded5f841764d67aa4c6e2e4b40d9dac5fbd2df135bccc58284a6917/tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5a8d8261ca2133d4f98aa9627c748189502b3787537ba3d7e2beb4f7cfc5d627", size = 8989010 }, - { url = "https://files.pythonhosted.org/packages/2b/a7/e0b5d5fea8cb69afdbab3c0e0cc3a02b5dd888ce0f933312f7c0ca6b017e/tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c4fd4d71e6deb6ddf99d8d0eab87d1d16f635898906e631914a9bae8ae9f2cfb", size = 9303287 }, +sdist = { url = "https://files.pythonhosted.org/packages/20/41/c2be10975ca37f6ec40d7abd7e98a5213bb04f284b869c1a24e6504fd94d/tokenizers-0.21.0.tar.gz", hash = "sha256:ee0894bf311b75b0c03079f33859ae4b2334d675d4e93f5a4132e1eae2834fe4", size = 343021 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b0/5c/8b09607b37e996dc47e70d6a7b6f4bdd4e4d5ab22fe49d7374565c7fefaf/tokenizers-0.21.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:3c4c93eae637e7d2aaae3d376f06085164e1660f89304c0ab2b1d08a406636b2", size = 2647461 }, + { url = "https://files.pythonhosted.org/packages/22/7a/88e58bb297c22633ed1c9d16029316e5b5ac5ee44012164c2edede599a5e/tokenizers-0.21.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:f53ea537c925422a2e0e92a24cce96f6bc5046bbef24a1652a5edc8ba975f62e", size = 2563639 }, + { url = "https://files.pythonhosted.org/packages/f7/14/83429177c19364df27d22bc096d4c2e431e0ba43e56c525434f1f9b0fd00/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b177fb54c4702ef611de0c069d9169f0004233890e0c4c5bd5508ae05abf193", size = 2903304 }, + { url = "https://files.pythonhosted.org/packages/7e/db/3433eab42347e0dc5452d8fcc8da03f638c9accffefe5a7c78146666964a/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6b43779a269f4629bebb114e19c3fca0223296ae9fea8bb9a7a6c6fb0657ff8e", size = 2804378 }, + { url = "https://files.pythonhosted.org/packages/57/8b/7da5e6f89736c2ade02816b4733983fca1c226b0c42980b1ae9dc8fcf5cc/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aeb255802be90acfd363626753fda0064a8df06031012fe7d52fd9a905eb00e", size = 3095488 }, + { url = "https://files.pythonhosted.org/packages/4d/f6/5ed6711093dc2c04a4e03f6461798b12669bc5a17c8be7cce1240e0b5ce8/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8b09dbeb7a8d73ee204a70f94fc06ea0f17dcf0844f16102b9f414f0b7463ba", size = 3121410 }, + { url = "https://files.pythonhosted.org/packages/81/42/07600892d48950c5e80505b81411044a2d969368cdc0d929b1c847bf6697/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:400832c0904f77ce87c40f1a8a27493071282f785724ae62144324f171377273", size = 3388821 }, + { url = "https://files.pythonhosted.org/packages/22/06/69d7ce374747edaf1695a4f61b83570d91cc8bbfc51ccfecf76f56ab4aac/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84ca973b3a96894d1707e189c14a774b701596d579ffc7e69debfc036a61a04", size = 3008868 }, + { url = "https://files.pythonhosted.org/packages/c8/69/54a0aee4d576045b49a0eb8bffdc495634309c823bf886042e6f46b80058/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:eb7202d231b273c34ec67767378cd04c767e967fda12d4a9e36208a34e2f137e", size = 8975831 }, + { url = "https://files.pythonhosted.org/packages/f7/f3/b776061e4f3ebf2905ba1a25d90380aafd10c02d406437a8ba22d1724d76/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:089d56db6782a73a27fd8abf3ba21779f5b85d4a9f35e3b493c7bbcbbf0d539b", size = 8920746 }, + { url = "https://files.pythonhosted.org/packages/d8/ee/ce83d5ec8b6844ad4c3ecfe3333d58ecc1adc61f0878b323a15355bcab24/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:c87ca3dc48b9b1222d984b6b7490355a6fdb411a2d810f6f05977258400ddb74", size = 9161814 }, + { url = "https://files.pythonhosted.org/packages/18/07/3e88e65c0ed28fa93aa0c4d264988428eef3df2764c3126dc83e243cb36f/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4145505a973116f91bc3ac45988a92e618a6f83eb458f49ea0790df94ee243ff", size = 9357138 }, + { url = "https://files.pythonhosted.org/packages/15/b0/dc4572ca61555fc482ebc933f26cb407c6aceb3dc19c301c68184f8cad03/tokenizers-0.21.0-cp39-abi3-win32.whl", hash = "sha256:eb1702c2f27d25d9dd5b389cc1f2f51813e99f8ca30d9e25348db6585a97e24a", size = 2202266 }, + { url = "https://files.pythonhosted.org/packages/44/69/d21eb253fa91622da25585d362a874fa4710be600f0ea9446d8d0217cec1/tokenizers-0.21.0-cp39-abi3-win_amd64.whl", hash = "sha256:87841da5a25a3a5f70c102de371db120f41873b854ba65e52bccd57df5a3780c", size = 2389192 }, ] [[package]] name = "tomli" -version = "2.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1e/e4/1b6cbcc82d8832dd0ce34767d5c560df8a3547ad8cbc427f34601415930a/tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8", size = 16622 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/de/f7/4da0ffe1892122c9ea096c57f64c2753ae5dd3ce85488802d11b0992cc6d/tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391", size = 13750 }, +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077 }, + { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429 }, + { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067 }, + { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030 }, + { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898 }, + { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894 }, + { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319 }, + { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273 }, + { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310 }, + { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309 }, + { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762 }, + { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453 }, + { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486 }, + { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349 }, + { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159 }, + { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243 }, + { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645 }, + { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584 }, + { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875 }, + { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418 }, + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708 }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582 }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543 }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691 }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170 }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530 }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666 }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954 }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724 }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383 }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257 }, ] [[package]] name = "torch" -version = "2.5.1" +version = "2.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, { name = "fsspec" }, { name = "jinja2" }, - { name = "networkx" }, - { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "networkx", version = "3.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, { name = "setuptools", marker = "python_full_version >= '3.12'" }, { name = "sympy" }, - { name = "triton", marker = "python_full_version < '3.13' and platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, { name = "typing-extensions" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/2a/ef/834af4a885b31a0b32fff2d80e1e40f771e1566ea8ded55347502440786a/torch-2.5.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:71328e1bbe39d213b8721678f9dcac30dfc452a46d586f1d514a6aa0a99d4744", size = 906446312 }, - { url = "https://files.pythonhosted.org/packages/69/f0/46e74e0d145f43fa506cb336eaefb2d240547e4ce1f496e442711093ab25/torch-2.5.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:34bfa1a852e5714cbfa17f27c49d8ce35e1b7af5608c4bc6e81392c352dbc601", size = 91919522 }, - { url = "https://files.pythonhosted.org/packages/a5/13/1eb674c8efbd04d71e4a157ceba991904f633e009a584dd65dccbafbb648/torch-2.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:32a037bd98a241df6c93e4c789b683335da76a2ac142c0973675b715102dc5fa", size = 203088048 }, - { url = "https://files.pythonhosted.org/packages/a9/9d/e0860474ee0ff8f6ef2c50ec8f71a250f38d78a9b9df9fd241ad3397a65b/torch-2.5.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:23d062bf70776a3d04dbe74db950db2a5245e1ba4f27208a87f0d743b0d06e86", size = 63877046 }, - { url = "https://files.pythonhosted.org/packages/d1/35/e8b2daf02ce933e4518e6f5682c72fd0ed66c15910ea1fb4168f442b71c4/torch-2.5.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:de5b7d6740c4b636ef4db92be922f0edc425b65ed78c5076c43c42d362a45457", size = 906474467 }, - { url = "https://files.pythonhosted.org/packages/40/04/bd91593a4ca178ece93ca55f27e2783aa524aaccbfda66831d59a054c31e/torch-2.5.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:340ce0432cad0d37f5a31be666896e16788f1adf8ad7be481196b503dad675b9", size = 91919450 }, - { url = "https://files.pythonhosted.org/packages/0d/4a/e51420d46cfc90562e85af2fee912237c662ab31140ab179e49bd69401d6/torch-2.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:603c52d2fe06433c18b747d25f5c333f9c1d58615620578c326d66f258686f9a", size = 203098237 }, - { url = "https://files.pythonhosted.org/packages/d0/db/5d9cbfbc7968d79c5c09a0bc0bc3735da079f2fd07cc10498a62b320a480/torch-2.5.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:31f8c39660962f9ae4eeec995e3049b5492eb7360dd4f07377658ef4d728fa4c", size = 63884466 }, - { url = "https://files.pythonhosted.org/packages/8b/5c/36c114d120bfe10f9323ed35061bc5878cc74f3f594003854b0ea298942f/torch-2.5.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:ed231a4b3a5952177fafb661213d690a72caaad97d5824dd4fc17ab9e15cec03", size = 906389343 }, - { url = "https://files.pythonhosted.org/packages/6d/69/d8ada8b6e0a4257556d5b4ddeb4345ea8eeaaef3c98b60d1cca197c7ad8e/torch-2.5.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:3f4b7f10a247e0dcd7ea97dc2d3bfbfc90302ed36d7f3952b0008d0df264e697", size = 91811673 }, - { url = "https://files.pythonhosted.org/packages/5f/ba/607d013b55b9fd805db2a5c2662ec7551f1910b4eef39653eeaba182c5b2/torch-2.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:73e58e78f7d220917c5dbfad1a40e09df9929d3b95d25e57d9f8558f84c9a11c", size = 203046841 }, - { url = "https://files.pythonhosted.org/packages/57/6c/bf52ff061da33deb9f94f4121fde7ff3058812cb7d2036c97bc167793bd1/torch-2.5.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:8c712df61101964eb11910a846514011f0b6f5920c55dbf567bff8a34163d5b1", size = 63858109 }, - { url = "https://files.pythonhosted.org/packages/69/72/20cb30f3b39a9face296491a86adb6ff8f1a47a897e4d14667e6cf89d5c3/torch-2.5.1-cp313-cp313-manylinux1_x86_64.whl", hash = "sha256:9b61edf3b4f6e3b0e0adda8b3960266b9009d02b37555971f4d1c8f7a05afed7", size = 906393265 }, - { url = "https://files.pythonhosted.org/packages/a9/18/81c399e8f4f1580d34bf99d827cb5fb5cf7a18a266bb5d30ca3ec2e89ba6/torch-2.5.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:1f3b7fb3cf7ab97fae52161423f81be8c6b8afac8d9760823fd623994581e1a3", size = 906479005 }, - { url = "https://files.pythonhosted.org/packages/5d/86/1c4b168d52cddb8d17952a7b5b25f69ef0da1fc34de1223d73d0d9db1801/torch-2.5.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:7974e3dce28b5a21fb554b73e1bc9072c25dde873fa00d54280861e7a009d7dc", size = 91846074 }, - { url = "https://files.pythonhosted.org/packages/76/49/4a0a8b19ce8f9bf32fcab4e863c7e2366f519f9826c84ca250567b11a014/torch-2.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:46c817d3ea33696ad3b9df5e774dba2257e9a4cd3c4a3afbf92f6bb13ac5ce2d", size = 203000888 }, - { url = "https://files.pythonhosted.org/packages/25/07/3548a7cfcf69d0eccec2ee79ee3913f1cdaadb27b36946774db86729ee47/torch-2.5.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:8046768b7f6d35b85d101b4b38cba8aa2f3cd51952bc4c06a49580f2ce682291", size = 63876023 }, + { url = "https://files.pythonhosted.org/packages/37/81/aa9ab58ec10264c1abe62c8b73f5086c3c558885d6beecebf699f0dbeaeb/torch-2.6.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:6860df13d9911ac158f4c44031609700e1eba07916fff62e21e6ffa0a9e01961", size = 766685561 }, + { url = "https://files.pythonhosted.org/packages/86/86/e661e229df2f5bfc6eab4c97deb1286d598bbeff31ab0cdb99b3c0d53c6f/torch-2.6.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c4f103a49830ce4c7561ef4434cc7926e5a5fe4e5eb100c19ab36ea1e2b634ab", size = 95751887 }, + { url = "https://files.pythonhosted.org/packages/20/e0/5cb2f8493571f0a5a7273cd7078f191ac252a402b5fb9cb6091f14879109/torch-2.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:56eeaf2ecac90da5d9e35f7f35eb286da82673ec3c582e310a8d1631a1c02341", size = 204165139 }, + { url = "https://files.pythonhosted.org/packages/e5/16/ea1b7842413a7b8a5aaa5e99e8eaf3da3183cc3ab345ad025a07ff636301/torch-2.6.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:09e06f9949e1a0518c5b09fe95295bc9661f219d9ecb6f9893e5123e10696628", size = 66520221 }, + { url = "https://files.pythonhosted.org/packages/78/a9/97cbbc97002fff0de394a2da2cdfa859481fdca36996d7bd845d50aa9d8d/torch-2.6.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:7979834102cd5b7a43cc64e87f2f3b14bd0e1458f06e9f88ffa386d07c7446e1", size = 766715424 }, + { url = "https://files.pythonhosted.org/packages/6d/fa/134ce8f8a7ea07f09588c9cc2cea0d69249efab977707cf67669431dcf5c/torch-2.6.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:ccbd0320411fe1a3b3fec7b4d3185aa7d0c52adac94480ab024b5c8f74a0bf1d", size = 95759416 }, + { url = "https://files.pythonhosted.org/packages/11/c5/2370d96b31eb1841c3a0883a492c15278a6718ccad61bb6a649c80d1d9eb/torch-2.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:46763dcb051180ce1ed23d1891d9b1598e07d051ce4c9d14307029809c4d64f7", size = 204164970 }, + { url = "https://files.pythonhosted.org/packages/0b/fa/f33a4148c6fb46ca2a3f8de39c24d473822d5774d652b66ed9b1214da5f7/torch-2.6.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:94fc63b3b4bedd327af588696559f68c264440e2503cc9e6954019473d74ae21", size = 66530713 }, + { url = "https://files.pythonhosted.org/packages/e5/35/0c52d708144c2deb595cd22819a609f78fdd699b95ff6f0ebcd456e3c7c1/torch-2.6.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:2bb8987f3bb1ef2675897034402373ddfc8f5ef0e156e2d8cfc47cacafdda4a9", size = 766624563 }, + { url = "https://files.pythonhosted.org/packages/01/d6/455ab3fbb2c61c71c8842753b566012e1ed111e7a4c82e0e1c20d0c76b62/torch-2.6.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:b789069020c5588c70d5c2158ac0aa23fd24a028f34a8b4fcb8fcb4d7efcf5fb", size = 95607867 }, + { url = "https://files.pythonhosted.org/packages/18/cf/ae99bd066571656185be0d88ee70abc58467b76f2f7c8bfeb48735a71fe6/torch-2.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:7e1448426d0ba3620408218b50aa6ada88aeae34f7a239ba5431f6c8774b1239", size = 204120469 }, + { url = "https://files.pythonhosted.org/packages/81/b4/605ae4173aa37fb5aa14605d100ff31f4f5d49f617928c9f486bb3aaec08/torch-2.6.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:9a610afe216a85a8b9bc9f8365ed561535c93e804c2a317ef7fabcc5deda0989", size = 66532538 }, + { url = "https://files.pythonhosted.org/packages/24/85/ead1349fc30fe5a32cadd947c91bda4a62fbfd7f8c34ee61f6398d38fb48/torch-2.6.0-cp313-cp313-manylinux1_x86_64.whl", hash = "sha256:4874a73507a300a5d089ceaff616a569e7bb7c613c56f37f63ec3ffac65259cf", size = 766626191 }, + { url = "https://files.pythonhosted.org/packages/dd/b0/26f06f9428b250d856f6d512413e9e800b78625f63801cbba13957432036/torch-2.6.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:a0d5e1b9874c1a6c25556840ab8920569a7a4137afa8a63a32cee0bc7d89bd4b", size = 95611439 }, + { url = "https://files.pythonhosted.org/packages/c2/9c/fc5224e9770c83faed3a087112d73147cd7c7bfb7557dcf9ad87e1dda163/torch-2.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:510c73251bee9ba02ae1cb6c9d4ee0907b3ce6020e62784e2d7598e0cfa4d6cc", size = 204126475 }, + { url = "https://files.pythonhosted.org/packages/88/8b/d60c0491ab63634763be1537ad488694d316ddc4a20eaadd639cedc53971/torch-2.6.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:ff96f4038f8af9f7ec4231710ed4549da1bdebad95923953a25045dcf6fd87e2", size = 66536783 }, + { url = "https://files.pythonhosted.org/packages/40/bb/feb5644baa621fd8e1e88bf51f6fa38ab3f985d472a764144ff4867ac1d6/torch-2.6.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:9ea955317cfcd3852b1402b62af258ce735c2edeee42ca9419b6bc889e5ae053", size = 766680961 }, + { url = "https://files.pythonhosted.org/packages/ee/11/08774a8198a33263947c59e04b8a0bf85a61a44e82100c46cf833bbce35e/torch-2.6.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:bb2c6c3e65049f081940f5ab15c9136c7de40d3f01192541c920a07c7c585b7e", size = 95782656 }, + { url = "https://files.pythonhosted.org/packages/c1/0d/56fb07032accbfebb4555638b6002ec5678d0942da85497e40f9405ab756/torch-2.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:683410f97984103148e31b38a8631acf31c3034c020c0f4d26171e7626d8317a", size = 204061417 }, + { url = "https://files.pythonhosted.org/packages/b3/17/41f681b87290a1d2f1394f943e470f8b0b3c2987b7df8dc078d8831fce5b/torch-2.6.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:265f70de5fd45b864d924b64be1797f86e76c8e48a02c2a3a6fc7ec247d2226c", size = 66520446 }, ] [[package]] @@ -2015,14 +2044,15 @@ source = { editable = "." } dependencies = [ { name = "cloudpickle" }, { name = "fabric" }, - { name = "numpy" }, + { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "numpy", version = "2.2.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, { name = "torch" }, ] [package.dev-dependencies] dev = [ { name = "build" }, - { name = "pyright" }, + { name = "pyright", extra = ["nodejs"] }, { name = "pytest" }, { name = "ruff" }, { name = "twine" }, @@ -2050,7 +2080,7 @@ requires-dist = [ [package.metadata.requires-dev] dev = [ { name = "build" }, - { name = "pyright" }, + { name = "pyright", extras = ["nodejs"] }, { name = "pytest" }, { name = "ruff" }, { name = "twine" }, @@ -2069,24 +2099,25 @@ docs = [ [[package]] name = "tqdm" -version = "4.67.0" +version = "4.67.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "platform_system == 'Windows'" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e8/4f/0153c21dc5779a49a0598c445b1978126b1344bab9ee71e53e44877e14e0/tqdm-4.67.0.tar.gz", hash = "sha256:fe5a6f95e6fe0b9755e9469b77b9c3cf850048224ecaa8293d7d2d31f97d869a", size = 169739 } +sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737 } wheels = [ - { url = "https://files.pythonhosted.org/packages/2b/78/57043611a16c655c8350b4c01b8d6abfb38cc2acb475238b62c2146186d7/tqdm-4.67.0-py3-none-any.whl", hash = "sha256:0cd8af9d56911acab92182e88d763100d4788bdf421d251616040cc4d44863be", size = 78590 }, + { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540 }, ] [[package]] name = "transformers" -version = "4.46.2" +version = "4.48.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, { name = "huggingface-hub" }, - { name = "numpy" }, + { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "numpy", version = "2.2.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, { name = "packaging" }, { name = "pyyaml" }, { name = "regex" }, @@ -2095,33 +2126,32 @@ dependencies = [ { name = "tokenizers" }, { name = "tqdm" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/05/6f/8f964f61983e3989c8ff23b5c21464807c6bc6236f36cdd41108222556d9/transformers-4.46.2.tar.gz", hash = "sha256:3d85410881e1c074be767877bf33c83231ec11529f274a6044ecb20c157ba14e", size = 8611717 } +sdist = { url = "https://files.pythonhosted.org/packages/e3/82/cebeb7af5e64440f1638f18c4ed0f89156d0eeaa6290d98da8ca93ac3872/transformers-4.48.3.tar.gz", hash = "sha256:a5e8f1e9a6430aa78215836be70cecd3f872d99eeda300f41ad6cc841724afdb", size = 8373458 } wheels = [ - { url = "https://files.pythonhosted.org/packages/ed/ad/c9b96572ab7994e73c64588f8875741823f2daba70e746547fff9a2d9a54/transformers-4.46.2-py3-none-any.whl", hash = "sha256:c921f4406b78e6518c97b618c5acd1cf8a4f2315b6b727f4bf9e01496eef849c", size = 10034514 }, + { url = "https://files.pythonhosted.org/packages/b6/1a/efeecb8d83705f2f4beac98d46f2148c95ecd7babfb31b5c0f1e7017e83d/transformers-4.48.3-py3-none-any.whl", hash = "sha256:78697f990f5ef350c23b46bf86d5081ce96b49479ab180b2de7687267de8fd36", size = 9669412 }, ] [[package]] name = "triton" -version = "3.1.0" +version = "3.2.0" source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "filelock", marker = "python_full_version < '3.13'" }, -] wheels = [ - { url = "https://files.pythonhosted.org/packages/98/29/69aa56dc0b2eb2602b553881e34243475ea2afd9699be042316842788ff5/triton-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b0dd10a925263abbe9fa37dcde67a5e9b2383fc269fdf59f5657cac38c5d1d8", size = 209460013 }, - { url = "https://files.pythonhosted.org/packages/86/17/d9a5cf4fcf46291856d1e90762e36cbabd2a56c7265da0d1d9508c8e3943/triton-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f34f6e7885d1bf0eaaf7ba875a5f0ce6f3c13ba98f9503651c1e6dc6757ed5c", size = 209506424 }, - { url = "https://files.pythonhosted.org/packages/78/eb/65f5ba83c2a123f6498a3097746607e5b2f16add29e36765305e4ac7fdd8/triton-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8182f42fd8080a7d39d666814fa36c5e30cc00ea7eeeb1a2983dbb4c99a0fdc", size = 209551444 }, - { url = "https://files.pythonhosted.org/packages/c4/69/57e0fed438d547524e08bfedc587078314176ad1c15c8be904d3f03149ec/triton-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aafa9a20cd0d9fee523cd4504aa7131807a864cd77dcf6efe7e981f18b8c6c11", size = 209460480 }, + { url = "https://files.pythonhosted.org/packages/01/65/3ffa90e158a2c82f0716eee8d26a725d241549b7d7aaf7e4f44ac03ebd89/triton-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3e54983cd51875855da7c68ec05c05cf8bb08df361b1d5b69e05e40b0c9bd62", size = 253090354 }, + { url = "https://files.pythonhosted.org/packages/a7/2e/757d2280d4fefe7d33af7615124e7e298ae7b8e3bc4446cdb8e88b0f9bab/triton-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8009a1fb093ee8546495e96731336a33fb8856a38e45bb4ab6affd6dbc3ba220", size = 253157636 }, + { url = "https://files.pythonhosted.org/packages/06/00/59500052cb1cf8cf5316be93598946bc451f14072c6ff256904428eaf03c/triton-3.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d9b215efc1c26fa7eefb9a157915c92d52e000d2bf83e5f69704047e63f125c", size = 253159365 }, + { url = "https://files.pythonhosted.org/packages/c7/30/37a3384d1e2e9320331baca41e835e90a3767303642c7a80d4510152cbcf/triton-3.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5dfa23ba84541d7c0a531dfce76d8bcd19159d50a4a8b14ad01e91734a5c1b0", size = 253154278 }, + { url = "https://files.pythonhosted.org/packages/bc/74/9f12bdedeb110242d8bb1bd621f6605e753ee0cbf73cf7f3a62b8173f190/triton-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30ceed0eff2c4a73b14eb63e052992f44bbdf175f3fad21e1ac8097a772de7ee", size = 253057866 }, ] [[package]] name = "twine" -version = "5.1.1" +version = "6.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "importlib-metadata" }, - { name = "keyring" }, - { name = "pkginfo" }, + { name = "id" }, + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "keyring", marker = "platform_machine != 'ppc64le' and platform_machine != 's390x'" }, + { name = "packaging" }, { name = "readme-renderer" }, { name = "requests" }, { name = "requests-toolbelt" }, @@ -2129,9 +2159,9 @@ dependencies = [ { name = "rich" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/77/68/bd982e5e949ef8334e6f7dcf76ae40922a8750aa2e347291ae1477a4782b/twine-5.1.1.tar.gz", hash = "sha256:9aa0825139c02b3434d913545c7b847a21c835e11597f5255842d457da2322db", size = 225531 } +sdist = { url = "https://files.pythonhosted.org/packages/c8/a2/6df94fc5c8e2170d21d7134a565c3a8fb84f9797c1dd65a5976aaf714418/twine-6.1.0.tar.gz", hash = "sha256:be324f6272eff91d07ee93f251edf232fc647935dd585ac003539b42404a8dbd", size = 168404 } wheels = [ - { url = "https://files.pythonhosted.org/packages/5d/ec/00f9d5fd040ae29867355e559a94e9a8429225a0284a3f5f091a3878bfc0/twine-5.1.1-py3-none-any.whl", hash = "sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997", size = 38650 }, + { url = "https://files.pythonhosted.org/packages/7c/b6/74e927715a285743351233f33ea3c684528a0d374d2e43ff9ce9585b73fe/twine-6.1.0-py3-none-any.whl", hash = "sha256:a47f973caf122930bf0fbbf17f80b83bc1602c9ce393c7845f289a3001dc5384", size = 40791 }, ] [[package]] @@ -2145,11 +2175,11 @@ wheels = [ [[package]] name = "urllib3" -version = "2.2.3" +version = "2.3.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ed/63/22ba4ebfe7430b76388e7cd448d5478814d3032121827c12a2cc287e2260/urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9", size = 300677 } +sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268 } wheels = [ - { url = "https://files.pythonhosted.org/packages/ce/d9/5f4c13cecde62396b0d3fe530a50ccea91e7dfc1ccf0e09c228841bb5ba8/urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", size = 126338 }, + { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369 }, ] [[package]] @@ -2163,51 +2193,77 @@ wheels = [ [[package]] name = "wrapt" -version = "1.16.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/95/4c/063a912e20bcef7124e0df97282a8af3ff3e4b603ce84c481d6d7346be0a/wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d", size = 53972 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/c6/5375258add3777494671d8cec27cdf5402abd91016dee24aa2972c61fedf/wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4", size = 37315 }, - { url = "https://files.pythonhosted.org/packages/32/12/e11adfde33444986135d8881b401e4de6cbb4cced046edc6b464e6ad7547/wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020", size = 38160 }, - { url = "https://files.pythonhosted.org/packages/70/7d/3dcc4a7e96f8d3e398450ec7703db384413f79bd6c0196e0e139055ce00f/wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440", size = 80419 }, - { url = "https://files.pythonhosted.org/packages/d1/c4/8dfdc3c2f0b38be85c8d9fdf0011ebad2f54e40897f9549a356bebb63a97/wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487", size = 72669 }, - { url = "https://files.pythonhosted.org/packages/49/83/b40bc1ad04a868b5b5bcec86349f06c1ee1ea7afe51dc3e46131e4f39308/wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf", size = 80271 }, - { url = "https://files.pythonhosted.org/packages/19/d4/cd33d3a82df73a064c9b6401d14f346e1d2fb372885f0295516ec08ed2ee/wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72", size = 84748 }, - { url = "https://files.pythonhosted.org/packages/ef/58/2fde309415b5fa98fd8f5f4a11886cbf276824c4c64d45a39da342fff6fe/wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0", size = 77522 }, - { url = "https://files.pythonhosted.org/packages/07/44/359e4724a92369b88dbf09878a7cde7393cf3da885567ea898e5904049a3/wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136", size = 84780 }, - { url = "https://files.pythonhosted.org/packages/88/8f/706f2fee019360cc1da652353330350c76aa5746b4e191082e45d6838faf/wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d", size = 35335 }, - { url = "https://files.pythonhosted.org/packages/19/2b/548d23362e3002ebbfaefe649b833fa43f6ca37ac3e95472130c4b69e0b4/wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2", size = 37528 }, - { url = "https://files.pythonhosted.org/packages/fd/03/c188ac517f402775b90d6f312955a5e53b866c964b32119f2ed76315697e/wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09", size = 37313 }, - { url = "https://files.pythonhosted.org/packages/0f/16/ea627d7817394db04518f62934a5de59874b587b792300991b3c347ff5e0/wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d", size = 38164 }, - { url = "https://files.pythonhosted.org/packages/7f/a7/f1212ba098f3de0fd244e2de0f8791ad2539c03bef6c05a9fcb03e45b089/wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389", size = 80890 }, - { url = "https://files.pythonhosted.org/packages/b7/96/bb5e08b3d6db003c9ab219c487714c13a237ee7dcc572a555eaf1ce7dc82/wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060", size = 73118 }, - { url = "https://files.pythonhosted.org/packages/6e/52/2da48b35193e39ac53cfb141467d9f259851522d0e8c87153f0ba4205fb1/wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1", size = 80746 }, - { url = "https://files.pythonhosted.org/packages/11/fb/18ec40265ab81c0e82a934de04596b6ce972c27ba2592c8b53d5585e6bcd/wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3", size = 85668 }, - { url = "https://files.pythonhosted.org/packages/0f/ef/0ecb1fa23145560431b970418dce575cfaec555ab08617d82eb92afc7ccf/wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956", size = 78556 }, - { url = "https://files.pythonhosted.org/packages/25/62/cd284b2b747f175b5a96cbd8092b32e7369edab0644c45784871528eb852/wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d", size = 85712 }, - { url = "https://files.pythonhosted.org/packages/e5/a7/47b7ff74fbadf81b696872d5ba504966591a3468f1bc86bca2f407baef68/wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362", size = 35327 }, - { url = "https://files.pythonhosted.org/packages/cf/c3/0084351951d9579ae83a3d9e38c140371e4c6b038136909235079f2e6e78/wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89", size = 37523 }, - { url = "https://files.pythonhosted.org/packages/92/17/224132494c1e23521868cdd57cd1e903f3b6a7ba6996b7b8f077ff8ac7fe/wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b", size = 37614 }, - { url = "https://files.pythonhosted.org/packages/6a/d7/cfcd73e8f4858079ac59d9db1ec5a1349bc486ae8e9ba55698cc1f4a1dff/wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36", size = 38316 }, - { url = "https://files.pythonhosted.org/packages/7e/79/5ff0a5c54bda5aec75b36453d06be4f83d5cd4932cc84b7cb2b52cee23e2/wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73", size = 86322 }, - { url = "https://files.pythonhosted.org/packages/c4/81/e799bf5d419f422d8712108837c1d9bf6ebe3cb2a81ad94413449543a923/wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809", size = 79055 }, - { url = "https://files.pythonhosted.org/packages/62/62/30ca2405de6a20448ee557ab2cd61ab9c5900be7cbd18a2639db595f0b98/wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b", size = 87291 }, - { url = "https://files.pythonhosted.org/packages/49/4e/5d2f6d7b57fc9956bf06e944eb00463551f7d52fc73ca35cfc4c2cdb7aed/wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81", size = 90374 }, - { url = "https://files.pythonhosted.org/packages/a6/9b/c2c21b44ff5b9bf14a83252a8b973fb84923764ff63db3e6dfc3895cf2e0/wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9", size = 83896 }, - { url = "https://files.pythonhosted.org/packages/14/26/93a9fa02c6f257df54d7570dfe8011995138118d11939a4ecd82cb849613/wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c", size = 91738 }, - { url = "https://files.pythonhosted.org/packages/a2/5b/4660897233eb2c8c4de3dc7cefed114c61bacb3c28327e64150dc44ee2f6/wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc", size = 35568 }, - { url = "https://files.pythonhosted.org/packages/5c/cc/8297f9658506b224aa4bd71906447dea6bb0ba629861a758c28f67428b91/wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8", size = 37653 }, - { url = "https://files.pythonhosted.org/packages/70/cc/b92e1da2cad6a9f8ee481000ece07a35e3b24e041e60ff8b850c079f0ebf/wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2", size = 37314 }, - { url = "https://files.pythonhosted.org/packages/4a/cc/3402bcc897978be00fef608cd9e3e39ec8869c973feeb5e1e277670e5ad2/wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb", size = 38162 }, - { url = "https://files.pythonhosted.org/packages/28/d3/4f079f649c515727c127c987b2ec2e0816b80d95784f2d28d1a57d2a1029/wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8", size = 80235 }, - { url = "https://files.pythonhosted.org/packages/a3/1c/226c2a4932e578a2241dcb383f425995f80224b446f439c2e112eb51c3a6/wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c", size = 72553 }, - { url = "https://files.pythonhosted.org/packages/b1/e7/459a8a4f40f2fa65eb73cb3f339e6d152957932516d18d0e996c7ae2d7ae/wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a", size = 80129 }, - { url = "https://files.pythonhosted.org/packages/da/6f/6d0b3c4983f1fc764a422989dabc268ee87d937763246cd48aa92f1eed1e/wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664", size = 84550 }, - { url = "https://files.pythonhosted.org/packages/96/e8/27ef35cf61e5147c1c3abcb89cfbb8d691b2bb8364803fcc950140bc14d8/wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f", size = 77352 }, - { url = "https://files.pythonhosted.org/packages/b6/ad/7a0766341081bfd9f18a7049e4d6d45586ae5c5bb0a640f05e2f558e849c/wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537", size = 84626 }, - { url = "https://files.pythonhosted.org/packages/09/43/b26852e9c45a1aac0d14b1080b25b612fa840ba99739c5fc55db07b7ce08/wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3", size = 35327 }, - { url = "https://files.pythonhosted.org/packages/74/f2/96ed140b08743f7f68d5bda35a2a589600781366c3da96f056043d258b1a/wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35", size = 37526 }, - { url = "https://files.pythonhosted.org/packages/ff/21/abdedb4cdf6ff41ebf01a74087740a709e2edb146490e4d9beea054b0b7a/wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1", size = 23362 }, +version = "1.17.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c3/fc/e91cc220803d7bc4db93fb02facd8461c37364151b8494762cc88b0fbcef/wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3", size = 55531 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/d1/1daec934997e8b160040c78d7b31789f19b122110a75eca3d4e8da0049e1/wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984", size = 53307 }, + { url = "https://files.pythonhosted.org/packages/1b/7b/13369d42651b809389c1a7153baa01d9700430576c81a2f5c5e460df0ed9/wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22", size = 38486 }, + { url = "https://files.pythonhosted.org/packages/62/bf/e0105016f907c30b4bd9e377867c48c34dc9c6c0c104556c9c9126bd89ed/wrapt-1.17.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7", size = 38777 }, + { url = "https://files.pythonhosted.org/packages/27/70/0f6e0679845cbf8b165e027d43402a55494779295c4b08414097b258ac87/wrapt-1.17.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c", size = 83314 }, + { url = "https://files.pythonhosted.org/packages/0f/77/0576d841bf84af8579124a93d216f55d6f74374e4445264cb378a6ed33eb/wrapt-1.17.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72", size = 74947 }, + { url = "https://files.pythonhosted.org/packages/90/ec/00759565518f268ed707dcc40f7eeec38637d46b098a1f5143bff488fe97/wrapt-1.17.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061", size = 82778 }, + { url = "https://files.pythonhosted.org/packages/f8/5a/7cffd26b1c607b0b0c8a9ca9d75757ad7620c9c0a9b4a25d3f8a1480fafc/wrapt-1.17.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2", size = 81716 }, + { url = "https://files.pythonhosted.org/packages/7e/09/dccf68fa98e862df7e6a60a61d43d644b7d095a5fc36dbb591bbd4a1c7b2/wrapt-1.17.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c", size = 74548 }, + { url = "https://files.pythonhosted.org/packages/b7/8e/067021fa3c8814952c5e228d916963c1115b983e21393289de15128e867e/wrapt-1.17.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62", size = 81334 }, + { url = "https://files.pythonhosted.org/packages/4b/0d/9d4b5219ae4393f718699ca1c05f5ebc0c40d076f7e65fd48f5f693294fb/wrapt-1.17.2-cp310-cp310-win32.whl", hash = "sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563", size = 36427 }, + { url = "https://files.pythonhosted.org/packages/72/6a/c5a83e8f61aec1e1aeef939807602fb880e5872371e95df2137142f5c58e/wrapt-1.17.2-cp310-cp310-win_amd64.whl", hash = "sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f", size = 38774 }, + { url = "https://files.pythonhosted.org/packages/cd/f7/a2aab2cbc7a665efab072344a8949a71081eed1d2f451f7f7d2b966594a2/wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58", size = 53308 }, + { url = "https://files.pythonhosted.org/packages/50/ff/149aba8365fdacef52b31a258c4dc1c57c79759c335eff0b3316a2664a64/wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda", size = 38488 }, + { url = "https://files.pythonhosted.org/packages/65/46/5a917ce85b5c3b490d35c02bf71aedaa9f2f63f2d15d9949cc4ba56e8ba9/wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438", size = 38776 }, + { url = "https://files.pythonhosted.org/packages/ca/74/336c918d2915a4943501c77566db41d1bd6e9f4dbc317f356b9a244dfe83/wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a", size = 83776 }, + { url = "https://files.pythonhosted.org/packages/09/99/c0c844a5ccde0fe5761d4305485297f91d67cf2a1a824c5f282e661ec7ff/wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000", size = 75420 }, + { url = "https://files.pythonhosted.org/packages/b4/b0/9fc566b0fe08b282c850063591a756057c3247b2362b9286429ec5bf1721/wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6", size = 83199 }, + { url = "https://files.pythonhosted.org/packages/9d/4b/71996e62d543b0a0bd95dda485219856def3347e3e9380cc0d6cf10cfb2f/wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b", size = 82307 }, + { url = "https://files.pythonhosted.org/packages/39/35/0282c0d8789c0dc9bcc738911776c762a701f95cfe113fb8f0b40e45c2b9/wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662", size = 75025 }, + { url = "https://files.pythonhosted.org/packages/4f/6d/90c9fd2c3c6fee181feecb620d95105370198b6b98a0770cba090441a828/wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72", size = 81879 }, + { url = "https://files.pythonhosted.org/packages/8f/fa/9fb6e594f2ce03ef03eddbdb5f4f90acb1452221a5351116c7c4708ac865/wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317", size = 36419 }, + { url = "https://files.pythonhosted.org/packages/47/f8/fb1773491a253cbc123c5d5dc15c86041f746ed30416535f2a8df1f4a392/wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3", size = 38773 }, + { url = "https://files.pythonhosted.org/packages/a1/bd/ab55f849fd1f9a58ed7ea47f5559ff09741b25f00c191231f9f059c83949/wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925", size = 53799 }, + { url = "https://files.pythonhosted.org/packages/53/18/75ddc64c3f63988f5a1d7e10fb204ffe5762bc663f8023f18ecaf31a332e/wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392", size = 38821 }, + { url = "https://files.pythonhosted.org/packages/48/2a/97928387d6ed1c1ebbfd4efc4133a0633546bec8481a2dd5ec961313a1c7/wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40", size = 38919 }, + { url = "https://files.pythonhosted.org/packages/73/54/3bfe5a1febbbccb7a2f77de47b989c0b85ed3a6a41614b104204a788c20e/wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d", size = 88721 }, + { url = "https://files.pythonhosted.org/packages/25/cb/7262bc1b0300b4b64af50c2720ef958c2c1917525238d661c3e9a2b71b7b/wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b", size = 80899 }, + { url = "https://files.pythonhosted.org/packages/2a/5a/04cde32b07a7431d4ed0553a76fdb7a61270e78c5fd5a603e190ac389f14/wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98", size = 89222 }, + { url = "https://files.pythonhosted.org/packages/09/28/2e45a4f4771fcfb109e244d5dbe54259e970362a311b67a965555ba65026/wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82", size = 86707 }, + { url = "https://files.pythonhosted.org/packages/c6/d2/dcb56bf5f32fcd4bd9aacc77b50a539abdd5b6536872413fd3f428b21bed/wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae", size = 79685 }, + { url = "https://files.pythonhosted.org/packages/80/4e/eb8b353e36711347893f502ce91c770b0b0929f8f0bed2670a6856e667a9/wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9", size = 87567 }, + { url = "https://files.pythonhosted.org/packages/17/27/4fe749a54e7fae6e7146f1c7d914d28ef599dacd4416566c055564080fe2/wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9", size = 36672 }, + { url = "https://files.pythonhosted.org/packages/15/06/1dbf478ea45c03e78a6a8c4be4fdc3c3bddea5c8de8a93bc971415e47f0f/wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991", size = 38865 }, + { url = "https://files.pythonhosted.org/packages/ce/b9/0ffd557a92f3b11d4c5d5e0c5e4ad057bd9eb8586615cdaf901409920b14/wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125", size = 53800 }, + { url = "https://files.pythonhosted.org/packages/c0/ef/8be90a0b7e73c32e550c73cfb2fa09db62234227ece47b0e80a05073b375/wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998", size = 38824 }, + { url = "https://files.pythonhosted.org/packages/36/89/0aae34c10fe524cce30fe5fc433210376bce94cf74d05b0d68344c8ba46e/wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5", size = 38920 }, + { url = "https://files.pythonhosted.org/packages/3b/24/11c4510de906d77e0cfb5197f1b1445d4fec42c9a39ea853d482698ac681/wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8", size = 88690 }, + { url = "https://files.pythonhosted.org/packages/71/d7/cfcf842291267bf455b3e266c0c29dcb675b5540ee8b50ba1699abf3af45/wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6", size = 80861 }, + { url = "https://files.pythonhosted.org/packages/d5/66/5d973e9f3e7370fd686fb47a9af3319418ed925c27d72ce16b791231576d/wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc", size = 89174 }, + { url = "https://files.pythonhosted.org/packages/a7/d3/8e17bb70f6ae25dabc1aaf990f86824e4fd98ee9cadf197054e068500d27/wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2", size = 86721 }, + { url = "https://files.pythonhosted.org/packages/6f/54/f170dfb278fe1c30d0ff864513cff526d624ab8de3254b20abb9cffedc24/wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b", size = 79763 }, + { url = "https://files.pythonhosted.org/packages/4a/98/de07243751f1c4a9b15c76019250210dd3486ce098c3d80d5f729cba029c/wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504", size = 87585 }, + { url = "https://files.pythonhosted.org/packages/f9/f0/13925f4bd6548013038cdeb11ee2cbd4e37c30f8bfd5db9e5a2a370d6e20/wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a", size = 36676 }, + { url = "https://files.pythonhosted.org/packages/bf/ae/743f16ef8c2e3628df3ddfd652b7d4c555d12c84b53f3d8218498f4ade9b/wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845", size = 38871 }, + { url = "https://files.pythonhosted.org/packages/3d/bc/30f903f891a82d402ffb5fda27ec1d621cc97cb74c16fea0b6141f1d4e87/wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192", size = 56312 }, + { url = "https://files.pythonhosted.org/packages/8a/04/c97273eb491b5f1c918857cd26f314b74fc9b29224521f5b83f872253725/wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b", size = 40062 }, + { url = "https://files.pythonhosted.org/packages/4e/ca/3b7afa1eae3a9e7fefe499db9b96813f41828b9fdb016ee836c4c379dadb/wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0", size = 40155 }, + { url = "https://files.pythonhosted.org/packages/89/be/7c1baed43290775cb9030c774bc53c860db140397047cc49aedaf0a15477/wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306", size = 113471 }, + { url = "https://files.pythonhosted.org/packages/32/98/4ed894cf012b6d6aae5f5cc974006bdeb92f0241775addad3f8cd6ab71c8/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb", size = 101208 }, + { url = "https://files.pythonhosted.org/packages/ea/fd/0c30f2301ca94e655e5e057012e83284ce8c545df7661a78d8bfca2fac7a/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681", size = 109339 }, + { url = "https://files.pythonhosted.org/packages/75/56/05d000de894c4cfcb84bcd6b1df6214297b8089a7bd324c21a4765e49b14/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6", size = 110232 }, + { url = "https://files.pythonhosted.org/packages/53/f8/c3f6b2cf9b9277fb0813418e1503e68414cd036b3b099c823379c9575e6d/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6", size = 100476 }, + { url = "https://files.pythonhosted.org/packages/a7/b1/0bb11e29aa5139d90b770ebbfa167267b1fc548d2302c30c8f7572851738/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f", size = 106377 }, + { url = "https://files.pythonhosted.org/packages/6a/e1/0122853035b40b3f333bbb25f1939fc1045e21dd518f7f0922b60c156f7c/wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555", size = 37986 }, + { url = "https://files.pythonhosted.org/packages/09/5e/1655cf481e079c1f22d0cabdd4e51733679932718dc23bf2db175f329b76/wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c", size = 40750 }, + { url = "https://files.pythonhosted.org/packages/8a/f4/6ed2b8f6f1c832933283974839b88ec7c983fd12905e01e97889dadf7559/wrapt-1.17.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a", size = 53308 }, + { url = "https://files.pythonhosted.org/packages/a2/a9/712a53f8f4f4545768ac532619f6e56d5d0364a87b2212531685e89aeef8/wrapt-1.17.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061", size = 38489 }, + { url = "https://files.pythonhosted.org/packages/fa/9b/e172c8f28a489a2888df18f953e2f6cb8d33b1a2e78c9dfc52d8bf6a5ead/wrapt-1.17.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82", size = 38776 }, + { url = "https://files.pythonhosted.org/packages/cf/cb/7a07b51762dcd59bdbe07aa97f87b3169766cadf240f48d1cbe70a1be9db/wrapt-1.17.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9", size = 83050 }, + { url = "https://files.pythonhosted.org/packages/a5/51/a42757dd41032afd6d8037617aa3bc6803ba971850733b24dfb7d5c627c4/wrapt-1.17.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f", size = 74718 }, + { url = "https://files.pythonhosted.org/packages/bf/bb/d552bfe47db02fcfc950fc563073a33500f8108efa5f7b41db2f83a59028/wrapt-1.17.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b", size = 82590 }, + { url = "https://files.pythonhosted.org/packages/77/99/77b06b3c3c410dbae411105bf22496facf03a5496bfaca8fbcf9da381889/wrapt-1.17.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f", size = 81462 }, + { url = "https://files.pythonhosted.org/packages/2d/21/cf0bd85ae66f92600829ea1de8e1da778e5e9f6e574ccbe74b66db0d95db/wrapt-1.17.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8", size = 74309 }, + { url = "https://files.pythonhosted.org/packages/6d/16/112d25e9092398a0dd6fec50ab7ac1b775a0c19b428f049785096067ada9/wrapt-1.17.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9", size = 81081 }, + { url = "https://files.pythonhosted.org/packages/2b/49/364a615a0cc0872685646c495c7172e4fc7bf1959e3b12a1807a03014e05/wrapt-1.17.2-cp39-cp39-win32.whl", hash = "sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb", size = 36423 }, + { url = "https://files.pythonhosted.org/packages/00/ad/5d2c1b34ba3202cd833d9221833e74d6500ce66730974993a8dc9a94fb8c/wrapt-1.17.2-cp39-cp39-win_amd64.whl", hash = "sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb", size = 38772 }, + { url = "https://files.pythonhosted.org/packages/2d/82/f56956041adef78f849db6b289b282e72b55ab8045a75abad81898c28d19/wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8", size = 23594 }, ] [[package]] From 2100e463da9d44b3569803da65157c1db08d76c0 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 7 Feb 2025 17:08:56 -0500 Subject: [PATCH 066/141] rename ext to integrations --- docs/source/examples/scripts/lightning_train.py | 2 +- src/torchrunx/ext/__init__.py | 1 - src/torchrunx/integrations/__init__.py | 1 + src/torchrunx/{ext => integrations}/lightning.py | 0 4 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 src/torchrunx/ext/__init__.py create mode 100644 src/torchrunx/integrations/__init__.py rename src/torchrunx/{ext => integrations}/lightning.py (100%) diff --git a/docs/source/examples/scripts/lightning_train.py b/docs/source/examples/scripts/lightning_train.py index 4da51f2a..8ec4e333 100644 --- a/docs/source/examples/scripts/lightning_train.py +++ b/docs/source/examples/scripts/lightning_train.py @@ -10,7 +10,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer import torchrunx -from torchrunx.ext.lightning import TorchrunxClusterEnvironment +from torchrunx.integrations.lightning import TorchrunxClusterEnvironment class GPT2CausalLMDataset(Dataset): def __init__(self, text_dataset): diff --git a/src/torchrunx/ext/__init__.py b/src/torchrunx/ext/__init__.py deleted file mode 100644 index 88896863..00000000 --- a/src/torchrunx/ext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Extensions classes and functions.""" diff --git a/src/torchrunx/integrations/__init__.py b/src/torchrunx/integrations/__init__.py new file mode 100644 index 00000000..58cebc98 --- /dev/null +++ b/src/torchrunx/integrations/__init__.py @@ -0,0 +1 @@ +"""Utilities for integrations with other libraries.""" diff --git a/src/torchrunx/ext/lightning.py b/src/torchrunx/integrations/lightning.py similarity index 100% rename from src/torchrunx/ext/lightning.py rename to src/torchrunx/integrations/lightning.py From 90744e1766876fbd82a75d61be9ca5ec55ca6c79 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 7 Feb 2025 17:11:53 -0500 Subject: [PATCH 067/141] rename to test-extras dep group --- pyproject.toml | 2 +- uv.lock | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index dc3a877d..6362c518 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,7 +31,7 @@ dependencies = [ ] [dependency-groups] dev = ["ruff", "pyright[nodejs]", "pytest", "build", "twine"] -dev-extras = ["submitit", "transformers"] +test-extras = ["submitit", "transformers"] docs = ["sphinx==7.4.7", "furo==2024.8.6", "myst-parser==3.0.1", "sphinx-autodoc2==0.5.0", "sphinx-toolbox==3.8.1"] [tool.uv] diff --git a/uv.lock b/uv.lock index c9287d45..dd852a3a 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ name = "importlib-metadata" version = "8.6.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "zipp" }, + { name = "zipp", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/33/08/c1395a292bb23fd03bdf572a1357c5a733d3eecbab877641ceacab23db6e/importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580", size = 55767 } wheels = [ @@ -2057,10 +2057,6 @@ dev = [ { name = "ruff" }, { name = "twine" }, ] -dev-extras = [ - { name = "submitit" }, - { name = "transformers" }, -] docs = [ { name = "furo" }, { name = "myst-parser" }, @@ -2068,6 +2064,10 @@ docs = [ { name = "sphinx-autodoc2" }, { name = "sphinx-toolbox" }, ] +test-extras = [ + { name = "submitit" }, + { name = "transformers" }, +] [package.metadata] requires-dist = [ @@ -2085,10 +2085,6 @@ dev = [ { name = "ruff" }, { name = "twine" }, ] -dev-extras = [ - { name = "submitit" }, - { name = "transformers" }, -] docs = [ { name = "furo", specifier = "==2024.8.6" }, { name = "myst-parser", specifier = "==3.0.1" }, @@ -2096,6 +2092,10 @@ docs = [ { name = "sphinx-autodoc2", specifier = "==0.5.0" }, { name = "sphinx-toolbox", specifier = "==3.8.1" }, ] +test-extras = [ + { name = "submitit" }, + { name = "transformers" }, +] [[package]] name = "tqdm" From 0db437bcd259afd23a25e8e90269cbf760bda026 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 7 Feb 2025 17:18:04 -0500 Subject: [PATCH 068/141] testing docs build --- .github/workflows/docs.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 4269254a..18087ee8 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -2,7 +2,7 @@ name: Build and publish docs on: push: - branches: [main] + branches: [update-docs] jobs: @@ -21,7 +21,7 @@ jobs: version: "0.5.0" python-version-file: ".python-version" enable-cache: true - - run: uv run --group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html + - run: uv run --only-group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html - uses: actions/configure-pages@v5 - uses: actions/upload-pages-artifact@v2 with: From 70b9074d09dc96731f5edb535d75fa629df0eebb Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 7 Feb 2025 17:22:05 -0500 Subject: [PATCH 069/141] linting fixes --- src/torchrunx/__init__.py | 6 +++--- src/torchrunx/launcher.py | 2 +- src/torchrunx/utils/comm.py | 8 ++++---- src/torchrunx/utils/environment.py | 2 +- src/torchrunx/utils/errors.py | 2 +- src/torchrunx/utils/logging.py | 10 +++++----- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/torchrunx/__init__.py b/src/torchrunx/__init__.py index 405a7715..f551c5b0 100644 --- a/src/torchrunx/__init__.py +++ b/src/torchrunx/__init__.py @@ -6,11 +6,11 @@ __all__ = [ "AgentFailedError", - "WorkerFailedError", - "Launcher", - "launch", "LaunchResult", + "Launcher", + "WorkerFailedError", "add_filter_to_handler", "file_handler", + "launch", "stream_handler", ] diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index e2d102ca..3db6d0ab 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -2,7 +2,7 @@ from __future__ import annotations -__all__ = ["Launcher", "launch", "LaunchResult"] +__all__ = ["LaunchResult", "Launcher", "launch"] import fnmatch import ipaddress diff --git a/src/torchrunx/utils/comm.py b/src/torchrunx/utils/comm.py index 4ed16f0a..e4733bfc 100644 --- a/src/torchrunx/utils/comm.py +++ b/src/torchrunx/utils/comm.py @@ -3,12 +3,12 @@ from __future__ import annotations __all__ = [ - "get_open_port", - "LauncherAgentGroup", - "LauncherPayload", "AgentPayload", - "ExceptionFromWorker", "AgentStatus", + "ExceptionFromWorker", + "LauncherAgentGroup", + "LauncherPayload", + "get_open_port", ] import datetime diff --git a/src/torchrunx/utils/environment.py b/src/torchrunx/utils/environment.py index c297f027..0df28e8c 100644 --- a/src/torchrunx/utils/environment.py +++ b/src/torchrunx/utils/environment.py @@ -2,7 +2,7 @@ from __future__ import annotations -__all__ = ["in_slurm_job", "slurm_hosts", "slurm_workers", "auto_hosts", "auto_workers"] +__all__ = ["auto_hosts", "auto_workers", "in_slurm_job", "slurm_hosts", "slurm_workers"] import os import subprocess diff --git a/src/torchrunx/utils/errors.py b/src/torchrunx/utils/errors.py index e6bb3d24..b211f007 100644 --- a/src/torchrunx/utils/errors.py +++ b/src/torchrunx/utils/errors.py @@ -4,8 +4,8 @@ __all__ = [ "AgentFailedError", - "WorkerFailedError", "ExceptionFromWorker", + "WorkerFailedError", ] diff --git a/src/torchrunx/utils/logging.py b/src/torchrunx/utils/logging.py index e8437816..6e7ea404 100644 --- a/src/torchrunx/utils/logging.py +++ b/src/torchrunx/utils/logging.py @@ -4,14 +4,14 @@ __all__ = [ "LoggingServerArgs", - "start_logging_server", - "redirect_stdio_to_logger", - "log_records_to_socket", "add_filter_to_handler", + "default_handlers", "file_handler", - "stream_handler", "file_handlers", - "default_handlers", + "log_records_to_socket", + "redirect_stdio_to_logger", + "start_logging_server", + "stream_handler", ] import datetime From 913162ed588e93da62623c0bd9bf1e3d2a4586b4 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 7 Feb 2025 17:24:08 -0500 Subject: [PATCH 070/141] bump deps for docs build --- .github/workflows/docs.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 18087ee8..97ab25c8 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -16,15 +16,15 @@ jobs: url: ${{ steps.deployment.outputs.page_url }} steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v3.2.2 + - uses: astral-sh/setup-uv@v5.2.2 with: - version: "0.5.0" + version: "0.5.29" python-version-file: ".python-version" enable-cache: true - run: uv run --only-group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html - uses: actions/configure-pages@v5 - - uses: actions/upload-pages-artifact@v2 + - uses: actions/upload-pages-artifact@v3 with: path: docs/_build/html - id: deployment - uses: actions/deploy-pages@v3 + uses: actions/deploy-pages@v4 From 6e9fab734a34f2e002e54d62b8a4999648316cfd Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 7 Feb 2025 17:44:16 -0500 Subject: [PATCH 071/141] update workflows, remove readthedocs settings --- .github/workflows/docs.yml | 30 ------------------------------ .github/workflows/main.yml | 9 ++++----- .github/workflows/release.yml | 27 ++++++++++++++++++++++++--- docs/.readthedocs.yaml | 11 ----------- 4 files changed, 28 insertions(+), 49 deletions(-) delete mode 100644 .github/workflows/docs.yml delete mode 100644 docs/.readthedocs.yaml diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml deleted file mode 100644 index 97ab25c8..00000000 --- a/.github/workflows/docs.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: Build and publish docs - -on: - push: - branches: [update-docs] - -jobs: - - publish-docs: - runs-on: ubuntu-latest - permissions: - pages: write - id-token: write - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - steps: - - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v5.2.2 - with: - version: "0.5.29" - python-version-file: ".python-version" - enable-cache: true - - run: uv run --only-group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html - - uses: actions/configure-pages@v5 - - uses: actions/upload-pages-artifact@v3 - with: - path: docs/_build/html - - id: deployment - uses: actions/deploy-pages@v4 diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 6e06a3e1..6e026fa6 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -13,10 +13,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v3.2.2 + - uses: astral-sh/setup-uv@v5 with: - version: "0.5.0" - python-version-file: ".python-version" + version: "0.5.29" enable-cache: true - run: uv sync - run: uv run --frozen ruff check @@ -56,9 +55,9 @@ jobs: pytorch: ${{fromJson(needs.get-pytorch-versions.outputs.versions)}} steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v3.2.2 + - uses: astral-sh/setup-uv@v5 with: - version: "0.5.0" + version: "0.5.29" - if: contains('2.0,2.1,2.2', matrix.pytorch) run: echo "NUMPY_VERSION=--with \"numpy<2\"" >> $GITHUB_ENV - run: uv run --python ${{ matrix.python }} --with torch~=${{ matrix.pytorch }} ${{ env.NUMPY_VERSION }} pytest --verbose tests/test_ci.py diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6922f831..199b0798 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,14 +5,35 @@ on: types: [published] jobs: - release: + release-to-pypi: runs-on: ubuntu-latest permissions: id-token: write steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v3 + - uses: astral-sh/setup-uv@v5 with: - version: "0.5.0" + version: "0.5.29" - run: uv build - run: uv publish + + publish-docs: + runs-on: ubuntu-latest + permissions: + pages: write + id-token: write + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v5 + with: + version: "0.5.29" + - run: uv run --only-group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html + - uses: actions/configure-pages@v5 + - uses: actions/upload-pages-artifact@v3 + with: + path: docs/_build/html + - id: deployment + uses: actions/deploy-pages@v4 diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml deleted file mode 100644 index d819dfbd..00000000 --- a/docs/.readthedocs.yaml +++ /dev/null @@ -1,11 +0,0 @@ -version: 2 - -build: - os: ubuntu-24.04 - tools: - python: "3.9" - commands: - - asdf plugin add uv - - asdf install uv latest - - asdf global uv latest - - uv run --group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source $READTHEDOCS_OUTPUT/html From 0173aed282d2db522b7ca16565c1a3d7422a9676 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 7 Feb 2025 17:48:03 -0500 Subject: [PATCH 072/141] fix docs deps --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 199b0798..17298d24 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -30,7 +30,7 @@ jobs: - uses: astral-sh/setup-uv@v5 with: version: "0.5.29" - - run: uv run --only-group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html + - run: uv run --group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html - uses: actions/configure-pages@v5 - uses: actions/upload-pages-artifact@v3 with: From 14d62296e97ef0fed213754e36e7a652214e6863 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 7 Feb 2025 17:52:53 -0500 Subject: [PATCH 073/141] add publish-docs to main PR (temp) --- .github/workflows/main.yml | 21 +++++++++++++++++++++ docs/source/examples/transformers.md | 2 +- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 6e026fa6..f61b1a9a 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -61,3 +61,24 @@ jobs: - if: contains('2.0,2.1,2.2', matrix.pytorch) run: echo "NUMPY_VERSION=--with \"numpy<2\"" >> $GITHUB_ENV - run: uv run --python ${{ matrix.python }} --with torch~=${{ matrix.pytorch }} ${{ env.NUMPY_VERSION }} pytest --verbose tests/test_ci.py + + publish-docs: + runs-on: ubuntu-latest + permissions: + pages: write + id-token: write + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v5 + with: + version: "0.5.29" + - run: uv run --group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html + - uses: actions/configure-pages@v5 + - uses: actions/upload-pages-artifact@v3 + with: + path: docs/_build/html + - id: deployment + uses: actions/deploy-pages@v4 diff --git a/docs/source/examples/transformers.md b/docs/source/examples/transformers.md index 3cafb8ef..73dd03a4 100644 --- a/docs/source/examples/transformers.md +++ b/docs/source/examples/transformers.md @@ -37,7 +37,7 @@ We don't need to pass `--launcher` arguments by default. But if you want to do m ### Script -[The [raw source code](https://torchrun.xyz/transformers_train.py) also specifies dependencies at the top of the file — in [PEP 723](https://peps.python.org/pep-0723) format — e.g. for `uv` as above.] +The [raw source code](https://torchrun.xyz/transformers_train.py) also specifies dependencies at the top of this file — in [PEP 723](https://peps.python.org/pep-0723) format — e.g. for `uv` as above. ```{eval-rst} .. literalinclude:: ./scripts/transformers_train.py From 58fb254a8ade5291dd006e3efbc82b03e11193be Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 00:20:41 -0500 Subject: [PATCH 074/141] add py.typed --- src/torchrunx/py.typed | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/torchrunx/py.typed diff --git a/src/torchrunx/py.typed b/src/torchrunx/py.typed new file mode 100644 index 00000000..e69de29b From 05ad2bf383f649c945c7d2e7c107a5d39bf6887a Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 00:31:15 -0500 Subject: [PATCH 075/141] removed dependabot --- .github/dependabot.yml | 8 -------- 1 file changed, 8 deletions(-) delete mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 0e1c39b9..00000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,8 +0,0 @@ -# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file - -version: 2 -updates: - - package-ecosystem: "pip" - directory: "/" - schedule: - interval: "daily" From a3c13df896c5995636b4e0a88046b41fa6cae71e Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 00:31:23 -0500 Subject: [PATCH 076/141] update citation and contributing --- CITATION.cff | 4 ++-- CONTRIBUTING.md | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CITATION.cff b/CITATION.cff index 0ba3bbe0..6b520878 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -9,6 +9,6 @@ authors: family-names: Curtin email: peter_curtin@brown.edu repository-code: 'https://github.com/apoorvkh/torchrunx' -url: 'https://torchrunx.readthedocs.io' +url: 'https://torchrun.xyz' license: GPL-3.0 -year: 2024 +year: 2025 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 15839e0e..2769890b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,3 +13,7 @@ Make a pull request with your changes on Github and we'll try to look at it soon `tests/` contains `pytest`-style tests for validating that code changes do not break the core functionality of our library. At the moment, we run `pytest tests/test_ci.py` (i.e. simple single-node CPU-only tests) in our Github Actions CI pipeline (`.github/workflows/release.yml`). One can manually run our more involved tests (on GPUs, on multiple machines from SLURM) on their own hardware. + +## Documentation + +Our documentation is hosted on Github Pages and is updated with every package release. We build our documentation with `sphinx` using the command: `uv run --group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html`. The documentation will then be generated at `docs/_build/html`. From 564a9a5ef467977bccb174a3c8f192ee8108f74f Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 12:20:20 -0500 Subject: [PATCH 077/141] finished transformers example in docs --- .../examples/scripts/transformers_help.txt | 4 +- .../examples/scripts/transformers_train.py | 82 +++++++++++-------- docs/source/examples/transformers.md | 31 +++---- 3 files changed, 63 insertions(+), 54 deletions(-) diff --git a/docs/source/examples/scripts/transformers_help.txt b/docs/source/examples/scripts/transformers_help.txt index 6dd4323e..3678bc59 100644 --- a/docs/source/examples/scripts/transformers_help.txt +++ b/docs/source/examples/scripts/transformers_help.txt @@ -30,15 +30,13 @@ usage: transformers_train.py [-h] [OPTIONS] │ (required) │ ╰────────────────────────────────────────────────────────────────────────────╯ ╭─ dataset options ──────────────────────────────────────────────────────────╮ -│ --dataset.tokenizer-name STR │ -│ (required) │ │ --dataset.path STR │ │ (required) │ │ --dataset.name {None}|STR │ │ (default: None) │ │ --dataset.split {None}|STR │ │ (default: None) │ -│ --dataset.text-column-name STR │ +│ --dataset.text-column STR │ │ (default: text) │ │ --dataset.num-samples {None}|INT │ │ (default: None) │ diff --git a/docs/source/examples/scripts/transformers_train.py b/docs/source/examples/scripts/transformers_train.py index 387549c6..61ea120c 100644 --- a/docs/source/examples/scripts/transformers_train.py +++ b/docs/source/examples/scripts/transformers_train.py @@ -12,9 +12,9 @@ # [docs:start-after] import functools import os +from dataclasses import dataclass from typing import Annotated -import tyro from datasets import Dataset, load_dataset from transformers import ( AutoModelForCausalLM, @@ -22,23 +22,38 @@ PreTrainedModel, Trainer, TrainingArguments, + trainer_utils, ) - import torchrunx +import tyro + +@dataclass +class ModelConfig: + name: str -def build_model(name: str) -> PreTrainedModel: - return AutoModelForCausalLM.from_pretrained(name) + +@dataclass +class DatasetConfig: + path: str + name: str | None = None + split: str | None = None + text_column: str = "text" + num_samples: int | None = None def load_training_data( tokenizer_name: str, - path: str, - name: str | None = None, - split: str | None = None, - text_column_name: str = "text", - num_samples: int | None = None, + dataset_config: DatasetConfig, ) -> Dataset: + # Load dataset + + dataset = load_dataset(dataset_config.path, name=dataset_config.name, split=dataset_config.split) + if dataset_config.num_samples is not None: + dataset = dataset.select(range(dataset_config.num_samples)) + + # Build tokenizer + os.environ["TOKENIZERS_PARALLELISM"] = "false" # to suppress warnings tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) if tokenizer.pad_token is None: @@ -50,46 +65,47 @@ def load_training_data( padding="max_length", ) - dataset = load_dataset(path, name=name, split=split) + # Tokenize dataset - if num_samples is None: - num_samples = len(dataset) - - return ( - dataset.select(range(num_samples)) - .map( - tokenize_fn, - batched=True, - input_columns=[text_column_name], - remove_columns=[text_column_name], - ) - .map(lambda x: {"labels": x["input_ids"]}) - ) + return dataset.map( + tokenize_fn, + batched=True, + input_columns=[dataset_config.text_column], + remove_columns=[dataset_config.text_column], + ).map(lambda x: {"labels": x["input_ids"]}) def train( - model: PreTrainedModel, training_args: TrainingArguments, train_dataset: Dataset -) -> PreTrainedModel | None: + model: PreTrainedModel, + train_dataset: Dataset, + training_args: TrainingArguments, +) -> str: trainer = Trainer( model=model, - args=training_args, train_dataset=train_dataset, + args=training_args, ) + trainer.train() - # TODO: return checkpoint path - if int(os.environ["RANK"]) == 0: - return model + return trainer_utils.get_last_checkpoint(training_args.output_dir) def main( launcher: torchrunx.Launcher, - model: Annotated[PreTrainedModel, tyro.conf.arg(prefix_name=False, constructor=build_model)], - train_dataset: Annotated[Dataset, tyro.conf.arg(name="dataset", constructor=load_training_data)], + model_config: Annotated[ModelConfig, tyro.conf.arg(name="model")], + dataset_config: Annotated[DatasetConfig, tyro.conf.arg(name="dataset")], training_args: Annotated[TrainingArguments, tyro.conf.arg(name="trainer", help="")], ): - results = launcher.run(train, (model, training_args, train_dataset)) - model = results.rank(0) + model = AutoModelForCausalLM.from_pretrained(model_config.name) + train_dataset = load_training_data(tokenizer_name=model_config.name, dataset_config=dataset_config) + + # Launch training + results = launcher.run(train, (model, train_dataset, training_args)) + + # Loading trained model from checkpoint + checkpoint_path = results.rank(0) + trained_model = AutoModelForCausalLM.from_pretrained(checkpoint_path) if __name__ == "__main__": diff --git a/docs/source/examples/transformers.md b/docs/source/examples/transformers.md index 73dd03a4..be3bc65f 100644 --- a/docs/source/examples/transformers.md +++ b/docs/source/examples/transformers.md @@ -1,14 +1,8 @@ # Transformers -Here's an example script that uses `torchrunx` with [`transformers.Trainer`](https://huggingface.co/docs/transformers/en/main_classes/trainer) to fine-tune any causal language model (from `transformers`) on any text dataset (from `datasets`) with any number of GPUs or nodes: [https://torchrun.xyz/transformers_train.py](https://torchrun.xyz/transformers_train.py). +Here's an example script that uses `torchrunx` with [`transformers.Trainer`](https://huggingface.co/docs/transformers/en/main_classes/trainer) to fine-tune any causal language model (from `transformers`) on any text dataset (from `datasets`) with any number of GPUs or nodes. -You can pass command-line arguments to customize: - - `--launcher`: [torchrunx.Launcher](../api.md#torchrunx.Launcher) - - `--model`: [`transformers.AutoModelForCausalLM`](https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoModelForCausalLM) - - `--dataset`: [`transformers.AutoTokenizer`](https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoTokenizer) and [`datasets.load_dataset`](https://huggingface.co/docs/datasets/en/package_reference/loading_methods#datasets.load_dataset) - - `--trainer`: [`transformers.TrainingArguments`](https://huggingface.co/docs/transformers/en/main_classes/trainer#transformers.TrainingArguments) - -The following arguments are required: `--model.name`, `--dataset.tokenizer-name`, `--dataset.path`, `--trainer.output-dir`. +[https://torchrun.xyz/transformers_train.py](https://raw.githubusercontent.com/apoorvkh/torchrunx/refs/heads/main/docs/source/examples/scripts/transformers_train.py)

python transformers_train.py --help

(expand)
@@ -18,27 +12,28 @@ The following arguments are required: `--model.name`, `--dataset.tokenizer-name` ```
-Of course, this script is a template: you can also edit the script first, as desired. + - `--launcher`: [torchrunx.Launcher](../api.md#torchrunx.Launcher) + - `--model`: [`transformers.AutoModelForCausalLM`](https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoModelForCausalLM) + - `--dataset`: [`datasets.load_dataset`](https://huggingface.co/docs/datasets/en/package_reference/loading_methods#datasets.load_dataset) + - `--trainer`: [`transformers.TrainingArguments`](https://huggingface.co/docs/transformers/en/main_classes/trainer#transformers.TrainingArguments) + +Required: `--model.name`, `--dataset.path`, `--trainer.output-dir` ### Training GPT-2 on WikiText in One Line -The following one-line command runs our script end-to-end (installing all dependencies, downloading model and data, training, logging to TensorBoard, etc.). - -Pre-requisites: [uv](https://docs.astral.sh/uv) +The following command runs our script end-to-end: installing all dependencies, downloading model and data, training, logging to TensorBoard, etc. Pre-requisite: [uv](https://docs.astral.sh/uv) ```bash uv run https://torchrun.xyz/transformers_train.py \ - --model.name gpt2 --dataset.tokenizer-name gpt2 \ - --dataset.path "Salesforce/wikitext" --dataset.name "wikitext-2-v1" --dataset.split "train" --dataset.num-samples 80 \ - --trainer.output_dir output --trainer.per-device-train-batch-size 4 --trainer.report-to tensorboard + --model.name gpt2 \ + --dataset.path "Salesforce/wikitext" --dataset.name "wikitext-2-v1" --dataset.split "train" --dataset.num-samples 80 \ + --trainer.output_dir output --trainer.per-device-train-batch-size 4 --trainer.report-to tensorboard ``` -We don't need to pass `--launcher` arguments by default. But if you want to do multi-node training (and are not using SLURM), you can also pass e.g. `--launcher.hostnames node1 node2`. +For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. ### Script -The [raw source code](https://torchrun.xyz/transformers_train.py) also specifies dependencies at the top of this file — in [PEP 723](https://peps.python.org/pep-0723) format — e.g. for `uv` as above. - ```{eval-rst} .. literalinclude:: ./scripts/transformers_train.py :start-after: # [docs:start-after] From 91a0868b0995010bcd3e54b184f9536e0b6e2a71 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 12:23:18 -0500 Subject: [PATCH 078/141] format script --- docs/source/examples/scripts/transformers_train.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/docs/source/examples/scripts/transformers_train.py b/docs/source/examples/scripts/transformers_train.py index 61ea120c..3048ba7a 100644 --- a/docs/source/examples/scripts/transformers_train.py +++ b/docs/source/examples/scripts/transformers_train.py @@ -80,14 +80,8 @@ def train( train_dataset: Dataset, training_args: TrainingArguments, ) -> str: - trainer = Trainer( - model=model, - train_dataset=train_dataset, - args=training_args, - ) - + trainer = Trainer(model=model, train_dataset=train_dataset, args=training_args) trainer.train() - return trainer_utils.get_last_checkpoint(training_args.output_dir) From b1896d3c7984e00870078a17957cda6ae02e324e Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 12:34:03 -0500 Subject: [PATCH 079/141] edited lightning integration --- docs/source/examples/lightning.md | 2 +- src/torchrunx/integrations/lightning.py | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/source/examples/lightning.md b/docs/source/examples/lightning.md index b4d5d925..3c414b94 100644 --- a/docs/source/examples/lightning.md +++ b/docs/source/examples/lightning.md @@ -1,4 +1,4 @@ -# Pytorch Lightning +# PyTorch Lightning ```{eval-rst} .. literalinclude:: ./scripts/lightning_train.py diff --git a/src/torchrunx/integrations/lightning.py b/src/torchrunx/integrations/lightning.py index 9d008535..bc886add 100644 --- a/src/torchrunx/integrations/lightning.py +++ b/src/torchrunx/integrations/lightning.py @@ -1,15 +1,14 @@ -"""Pytorch Lightning extension utilities.""" +"""Integration with PyTorch Lightning Trainer.""" -import torch from lightning.fabric.plugins.environments.torchelastic import ( # pyright: ignore [reportMissingImports] TorchElasticEnvironment, ) class TorchrunxClusterEnvironment(TorchElasticEnvironment): - """PyTorch Lightning ClusterEnvironment compatible with torchrunx.""" + """Compatible ClusterEnvironment for PyTorch Lightning.""" @staticmethod def detect() -> bool: - """Returns ``True`` if the current process was launched using torchrunx.""" - return torch.distributed.is_available() + """Force use of the TorchElasticEnvironment.""" + return True From 65da00e53502dde0e19e49652ed82275593b893a Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 12:34:47 -0500 Subject: [PATCH 080/141] changed name for utils.logging (ambiguous) --- pyproject.toml | 2 -- src/torchrunx/__init__.py | 2 +- src/torchrunx/agent.py | 2 +- src/torchrunx/launcher.py | 2 +- src/torchrunx/utils/{logging.py => logging_server.py} | 0 src/torchrunx/worker.py | 2 +- 6 files changed, 4 insertions(+), 6 deletions(-) rename src/torchrunx/utils/{logging.py => logging_server.py} (100%) diff --git a/pyproject.toml b/pyproject.toml index 6362c518..0e8cce1b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,8 +48,6 @@ src = ["src", "tests"] [tool.ruff.lint] select = ["ALL"] ignore = [ - "ANN101", - "ANN102", "ANN401", # self / cls / Any annotations "BLE001", # blind exceptions "TD", # todo syntax diff --git a/src/torchrunx/__init__.py b/src/torchrunx/__init__.py index f551c5b0..2590ef41 100644 --- a/src/torchrunx/__init__.py +++ b/src/torchrunx/__init__.py @@ -2,7 +2,7 @@ from .launcher import Launcher, LaunchResult, launch from .utils.errors import AgentFailedError, WorkerFailedError -from .utils.logging import add_filter_to_handler, file_handler, stream_handler +from .utils.logging_server import add_filter_to_handler, file_handler, stream_handler __all__ = [ "AgentFailedError", diff --git a/src/torchrunx/agent.py b/src/torchrunx/agent.py index d21dbf3a..811a9516 100644 --- a/src/torchrunx/agent.py +++ b/src/torchrunx/agent.py @@ -19,7 +19,7 @@ LauncherAgentGroup, get_open_port, ) -from .utils.logging import log_records_to_socket, redirect_stdio_to_logger +from .utils.logging_server import log_records_to_socket, redirect_stdio_to_logger from .worker import WorkerArgs, worker_entrypoint diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 3db6d0ab..ddd8a217 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -34,7 +34,7 @@ ExceptionFromWorker, WorkerFailedError, ) -from .utils.logging import LoggingServerArgs, start_logging_server +from .utils.logging_server import LoggingServerArgs, start_logging_server @dataclass diff --git a/src/torchrunx/utils/logging.py b/src/torchrunx/utils/logging_server.py similarity index 100% rename from src/torchrunx/utils/logging.py rename to src/torchrunx/utils/logging_server.py diff --git a/src/torchrunx/worker.py b/src/torchrunx/worker.py index e7307520..897346a3 100644 --- a/src/torchrunx/worker.py +++ b/src/torchrunx/worker.py @@ -15,7 +15,7 @@ import torch.distributed as dist from .utils.errors import ExceptionFromWorker -from .utils.logging import log_records_to_socket, redirect_stdio_to_logger +from .utils.logging_server import log_records_to_socket, redirect_stdio_to_logger __all__ = ["WorkerArgs", "worker_entrypoint"] From cab42682195f72ac4e2a4af300c739dab3e72eaf Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 12:41:04 -0500 Subject: [PATCH 081/141] pin dev deps --- pyproject.toml | 2 +- uv.lock | 244 +------------------------------------------------ 2 files changed, 5 insertions(+), 241 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0e8cce1b..6766a17e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,7 +30,7 @@ dependencies = [ "numpy>=1.20", ] [dependency-groups] -dev = ["ruff", "pyright[nodejs]", "pytest", "build", "twine"] +dev = ["ruff==0.9.5", "pyright[nodejs]==1.1.393", "pytest==8.3.4"] test-extras = ["submitit", "transformers"] docs = ["sphinx==7.4.7", "furo==2024.8.6", "myst-parser==3.0.1", "sphinx-autodoc2==0.5.0", "sphinx-toolbox==3.8.1"] diff --git a/uv.lock b/uv.lock index dd852a3a..4277ed3c 100644 --- a/uv.lock +++ b/uv.lock @@ -77,15 +77,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537 }, ] -[[package]] -name = "backports-tarfile" -version = "1.2.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/86/72/cd9b395f25e290e633655a100af28cb253e4393396264a98bd5f5951d50f/backports_tarfile-1.2.0.tar.gz", hash = "sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991", size = 86406 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b9/fa/123043af240e49752f1c4bd24da5053b6bd00cad78c2be53c0d1e8b975bc/backports.tarfile-1.2.0-py3-none-any.whl", hash = "sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34", size = 30181 }, -] - [[package]] name = "bcrypt" version = "4.2.1" @@ -131,22 +122,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f9/49/6abb616eb3cbab6a7cca303dc02fdf3836de2e0b834bf966a7f5271a34d8/beautifulsoup4-4.13.3-py3-none-any.whl", hash = "sha256:99045d7d3f08f91f0d656bc9b7efbae189426cd913d830294a15eefa0ea4df16", size = 186015 }, ] -[[package]] -name = "build" -version = "1.2.2.post1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama", marker = "os_name == 'nt'" }, - { name = "importlib-metadata", marker = "python_full_version < '3.10.2'" }, - { name = "packaging" }, - { name = "pyproject-hooks" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/7d/46/aeab111f8e06793e4f0e421fcad593d547fb8313b50990f31681ee2fb1ad/build-1.2.2.post1.tar.gz", hash = "sha256:b36993e92ca9375a219c99e606a122ff365a760a2d4bba0caa09bd5278b608b7", size = 46701 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/84/c2/80633736cd183ee4a62107413def345f7e6e3c01563dbca1417363cf957e/build-1.2.2.post1-py3-none-any.whl", hash = "sha256:1d61c0887fa860c01971625baae8bdd338e517b836a2f70dd1f7aa3a6b2fc5b5", size = 22950 }, -] - [[package]] name = "cachecontrol" version = "0.14.2" @@ -528,18 +503,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ea/da/6c2bea5327b640920267d3bf2c9fc114cfbd0a5de234d81cda80cc9e33c8/huggingface_hub-0.28.1-py3-none-any.whl", hash = "sha256:aa6b9a3ffdae939b72c464dbb0d7f99f56e649b55c3d52406f49e0a5a620c0a7", size = 464068 }, ] -[[package]] -name = "id" -version = "1.5.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "requests" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/22/11/102da08f88412d875fa2f1a9a469ff7ad4c874b0ca6fed0048fe385bdb3d/id-1.5.0.tar.gz", hash = "sha256:292cb8a49eacbbdbce97244f47a97b4c62540169c976552e497fd57df0734c1d", size = 15237 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/cb/18326d2d89ad3b0dd143da971e77afd1e6ca6674f1b1c3df4b6bec6279fc/id-1.5.0-py3-none-any.whl", hash = "sha256:f1434e1cef91f2cbb8a4ec64663d5a23b9ed43ef44c4c957d02583d61714c658", size = 13611 }, -] - [[package]] name = "idna" version = "3.10" @@ -563,7 +526,7 @@ name = "importlib-metadata" version = "8.6.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "zipp", marker = "python_full_version < '3.12'" }, + { name = "zipp", marker = "python_full_version < '3.10'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/33/08/c1395a292bb23fd03bdf572a1357c5a733d3eecbab877641ceacab23db6e/importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580", size = 55767 } wheels = [ @@ -588,51 +551,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0a/66/7f8c48009c72d73bc6bbe6eb87ac838d6a526146f7dab14af671121eb379/invoke-2.2.0-py3-none-any.whl", hash = "sha256:6ea924cc53d4f78e3d98bc436b08069a03077e6f85ad1ddaa8a116d7dad15820", size = 160274 }, ] -[[package]] -name = "jaraco-classes" -version = "3.4.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "more-itertools" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/06/c0/ed4a27bc5571b99e3cff68f8a9fa5b56ff7df1c2251cc715a652ddd26402/jaraco.classes-3.4.0.tar.gz", hash = "sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd", size = 11780 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/7f/66/b15ce62552d84bbfcec9a4873ab79d993a1dd4edb922cbfccae192bd5b5f/jaraco.classes-3.4.0-py3-none-any.whl", hash = "sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790", size = 6777 }, -] - -[[package]] -name = "jaraco-context" -version = "6.0.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "backports-tarfile", marker = "python_full_version < '3.12'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/df/ad/f3777b81bf0b6e7bc7514a1656d3e637b2e8e15fab2ce3235730b3e7a4e6/jaraco_context-6.0.1.tar.gz", hash = "sha256:9bae4ea555cf0b14938dc0aee7c9f32ed303aa20a3b73e7dc80111628792d1b3", size = 13912 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ff/db/0c52c4cf5e4bd9f5d7135ec7669a3a767af21b3a308e1ed3674881e52b62/jaraco.context-6.0.1-py3-none-any.whl", hash = "sha256:f797fc481b490edb305122c9181830a3a5b76d84ef6d1aef2fb9b47ab956f9e4", size = 6825 }, -] - -[[package]] -name = "jaraco-functools" -version = "4.1.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "more-itertools" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ab/23/9894b3df5d0a6eb44611c36aec777823fc2e07740dabbd0b810e19594013/jaraco_functools-4.1.0.tar.gz", hash = "sha256:70f7e0e2ae076498e212562325e805204fc092d7b4c17e0e86c959e249701a9d", size = 19159 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/4f/24b319316142c44283d7540e76c7b5a6dbd5db623abd86bb7b3491c21018/jaraco.functools-4.1.0-py3-none-any.whl", hash = "sha256:ad159f13428bc4acbf5541ad6dec511f91573b90fba04df61dafa2a1231cf649", size = 10187 }, -] - -[[package]] -name = "jeepney" -version = "0.8.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d6/f4/154cf374c2daf2020e05c3c6a03c91348d59b23c5366e968feb198306fdf/jeepney-0.8.0.tar.gz", hash = "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806", size = 106005 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ae/72/2a1e2290f1ab1e06f71f3d0f1646c9e4634e70e1d37491535e19266e8dc9/jeepney-0.8.0-py3-none-any.whl", hash = "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755", size = 48435 }, -] - [[package]] name = "jinja2" version = "3.1.5" @@ -645,24 +563,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bd/0f/2ba5fbcd631e3e88689309dbe978c5769e883e4b84ebfe7da30b43275c5a/jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb", size = 134596 }, ] -[[package]] -name = "keyring" -version = "25.6.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "importlib-metadata", marker = "python_full_version < '3.12'" }, - { name = "jaraco-classes" }, - { name = "jaraco-context" }, - { name = "jaraco-functools" }, - { name = "jeepney", marker = "sys_platform == 'linux'" }, - { name = "pywin32-ctypes", marker = "sys_platform == 'win32'" }, - { name = "secretstorage", marker = "sys_platform == 'linux'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/70/09/d904a6e96f76ff214be59e7aa6ef7190008f52a0ab6689760a98de0bf37d/keyring-25.6.0.tar.gz", hash = "sha256:0b39998aa941431eb3d9b0d4b2460bc773b9df6fed7621c2dfb291a7e0187a66", size = 62750 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d3/32/da7f44bcb1105d3e88a0b74ebdca50c59121d2ddf71c9e34ba47df7f3a56/keyring-25.6.0-py3-none-any.whl", hash = "sha256:552a3f7af126ece7ed5c89753650eec89c7eaae8617d0aa4d9ad2b75111266bd", size = 39085 }, -] - [[package]] name = "markdown-it-py" version = "3.0.0" @@ -897,37 +797,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f", size = 1723263 }, ] -[[package]] -name = "nh3" -version = "0.2.20" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/46/f2/eb781d94c7855e9129cbbdd3ab09a470441e4176a82a396ae1df270a7333/nh3-0.2.20.tar.gz", hash = "sha256:9705c42d7ff88a0bea546c82d7fe5e59135e3d3f057e485394f491248a1f8ed5", size = 17489 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/65/d31d93b6d1e5fe80d0cc18f0b96eaa561edfa0a15a6ef6b0fce50202a931/nh3-0.2.20-cp313-cp313t-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:e1061a4ab6681f6bdf72b110eea0c4e1379d57c9de937db3be4202f7ad6043db", size = 1202187 }, - { url = "https://files.pythonhosted.org/packages/b4/ae/5b03bf198e06921454012e4b9a51e676d26fd37d9fdc1f29371a0b380487/nh3-0.2.20-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb4254b1dac4a1ee49919a5b3f1caf9803ea8dada1816d9e8289e63d3cd0dd9a", size = 737822 }, - { url = "https://files.pythonhosted.org/packages/0a/53/a12dffb6ee3772deba82eb5997667fc835afd2e813d1f4080d8738f29eec/nh3-0.2.20-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0ae9cbd713524cdb81e64663d0d6aae26f678db9f2cd9db0bf162606f1f9f20c", size = 756643 }, - { url = "https://files.pythonhosted.org/packages/d0/0c/6cd2c5ac3e6e31f2a28721e8e2a924cb6b05ad054bf787bd1816ffd40b96/nh3-0.2.20-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e1f7370b4e14cc03f5ae141ef30a1caf81fa5787711f80be9081418dd9eb79d2", size = 923415 }, - { url = "https://files.pythonhosted.org/packages/64/f0/229a6c8b81b86ba22d8e7f27ade62cb2fcfb987e570f49944fdd8490a76a/nh3-0.2.20-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:ac4d27dc836a476efffc6eb661994426b8b805c951b29c9cf2ff36bc9ad58bc5", size = 994959 }, - { url = "https://files.pythonhosted.org/packages/75/e3/62ae3d3b658739ee15b129356fe6d4c4bc8ab235d7bf2e0d2794d64f7bc6/nh3-0.2.20-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:4fd2e9248725ebcedac3997a8d3da0d90a12a28c9179c6ba51f1658938ac30d0", size = 915777 }, - { url = "https://files.pythonhosted.org/packages/45/bd/8405d03371e335f02eb72e09dcf73307f8fd3095e4165cec6836346fe3db/nh3-0.2.20-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f7d564871833ddbe54df3aa59053b1110729d3a800cb7628ae8f42adb3d75208", size = 908614 }, - { url = "https://files.pythonhosted.org/packages/ee/f8/5d977f09cf82c1f22a864375f471db111530fc79c88efdf0659fe6d3d6bc/nh3-0.2.20-cp313-cp313t-win32.whl", hash = "sha256:d2a176fd4306b6f0f178a3f67fac91bd97a3a8d8fafb771c9b9ef675ba5c8886", size = 540482 }, - { url = "https://files.pythonhosted.org/packages/c5/f4/e34afe5fd8bed1920eac2974c9c853f548b4b65c139444285ffd2a68495d/nh3-0.2.20-cp313-cp313t-win_amd64.whl", hash = "sha256:6ed834c68452a600f517dd3e1534dbfaff1f67f98899fecf139a055a25d99150", size = 541302 }, - { url = "https://files.pythonhosted.org/packages/92/08/5e3b61eed1bc0efeb330ddc5cf5194f28a0b7be7943aa20bd44cfe14650b/nh3-0.2.20-cp38-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:76e2f603b30c02ff6456b233a83fc377dedab6a50947b04e960a6b905637b776", size = 1202141 }, - { url = "https://files.pythonhosted.org/packages/29/d2/3377f8006c71e95e007b07b5bfcac22c9de4744ca3efb23b396d3deb9581/nh3-0.2.20-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:181063c581defe683bd4bb78188ac9936d208aebbc74c7f7c16b6a32ae2ebb38", size = 760699 }, - { url = "https://files.pythonhosted.org/packages/37/d7/7077f925d7d680d53dcb6e18a4af13d1a7da59761c06c193bfa249a7470a/nh3-0.2.20-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:231addb7643c952cd6d71f1c8702d703f8fe34afcb20becb3efb319a501a12d7", size = 747353 }, - { url = "https://files.pythonhosted.org/packages/cb/59/6b2f32af477aae81f1454a7f6ef490ebc3c22dd9e1370e73fcfe243dc07a/nh3-0.2.20-cp38-abi3-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:1b9a8340a0aab991c68a5ca938d35ef4a8a3f4bf1b455da8855a40bee1fa0ace", size = 854125 }, - { url = "https://files.pythonhosted.org/packages/5b/f2/c3d2f7b801477b8b387b51fbefd16dc7ade888aeac547f18ba0558fd6f48/nh3-0.2.20-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10317cd96fe4bbd4eb6b95f3920b71c902157ad44fed103fdcde43e3b8ee8be6", size = 817453 }, - { url = "https://files.pythonhosted.org/packages/42/4d/f7e3a35506a0eba6eedafc21ad52773985511eb838812e9f96354831ad3c/nh3-0.2.20-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8698db4c04b140800d1a1cd3067fda399e36e1e2b8fc1fe04292a907350a3e9b", size = 891694 }, - { url = "https://files.pythonhosted.org/packages/e6/0e/c499453c296fb40366e3069cd68fde77a10f0a30a17b9d3b491eb3ebc5bf/nh3-0.2.20-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3eb04b9c3deb13c3a375ea39fd4a3c00d1f92e8fb2349f25f1e3e4506751774b", size = 744388 }, - { url = "https://files.pythonhosted.org/packages/18/67/c3de8022ba2719bdbbdd3704d1e32dbc7d3f8ac8646247711645fc90d051/nh3-0.2.20-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92f3f1c4f47a2c6f3ca7317b1d5ced05bd29556a75d3a4e2715652ae9d15c05d", size = 764831 }, - { url = "https://files.pythonhosted.org/packages/f0/14/a4ea40e2439717d11c3104fc2dc0ac412301b7aeb81d6a3d0e6505c77e7d/nh3-0.2.20-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ddefa9fd6794a87e37d05827d299d4b53a3ec6f23258101907b96029bfef138a", size = 923334 }, - { url = "https://files.pythonhosted.org/packages/ed/ae/e8ee8afaf67903dd304f390056d1ea620327524e2ad66127a331b14d5d98/nh3-0.2.20-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:ce3731c8f217685d33d9268362e5b4f770914e922bba94d368ab244a59a6c397", size = 994873 }, - { url = "https://files.pythonhosted.org/packages/20/b5/02122cfe3b36cf0ba0fcd73a04fd462e1f7a9d91b456f6e0b70e46df21c7/nh3-0.2.20-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:09f037c02fc2c43b211ff1523de32801dcfb0918648d8e651c36ef890f1731ec", size = 915707 }, - { url = "https://files.pythonhosted.org/packages/47/d3/5df43cc3570cdc9eb1dc79a39191f89fedf8bcefd8d30a161ff1dffb146c/nh3-0.2.20-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:813f1c8012dd64c990514b795508abb90789334f76a561fa0fd4ca32d2275330", size = 908539 }, - { url = "https://files.pythonhosted.org/packages/4f/fd/aa000f6c76a832c488eac26f20d2e8a221ba2b965efce692f14ebc4290bf/nh3-0.2.20-cp38-abi3-win32.whl", hash = "sha256:47b2946c0e13057855209daeffb45dc910bd0c55daf10190bb0b4b60e2999784", size = 540439 }, - { url = "https://files.pythonhosted.org/packages/19/31/d65594efd3b42b1de2335d576eb77525691fc320dbf8617948ee05c008e5/nh3-0.2.20-cp38-abi3-win_amd64.whl", hash = "sha256:da87573f03084edae8eb87cfe811ec338606288f81d333c07d2a9a0b9b976c0b", size = 541249 }, -] - [[package]] name = "nodeenv" version = "1.9.1" @@ -1272,15 +1141,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5e/22/d3db169895faaf3e2eda892f005f433a62db2decbcfbc2f61e6517adfa87/PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93", size = 212141 }, ] -[[package]] -name = "pyproject-hooks" -version = "1.2.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/82/28175b2414effca1cdac8dc99f76d660e7a4fb0ceefa4b4ab8f5f6742925/pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8", size = 19228 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/bd/24/12818598c362d7f300f18e74db45963dbcb85150324092410c8b49405e42/pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913", size = 10216 }, -] - [[package]] name = "pyright" version = "1.1.393" @@ -1316,15 +1176,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, ] -[[package]] -name = "pywin32-ctypes" -version = "0.2.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/85/9f/01a1a99704853cb63f253eea009390c88e7131c67e66a0a02099a8c917cb/pywin32-ctypes-0.2.3.tar.gz", hash = "sha256:d162dc04946d704503b2edc4d55f3dba5c1d539ead017afa00142c38b9885755", size = 29471 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/de/3d/8161f7711c017e01ac9f008dfddd9410dff3674334c233bde66e7ba65bbf/pywin32_ctypes-0.2.3-py3-none-any.whl", hash = "sha256:8a1513379d709975552d202d942d9837758905c8d01eb82b8bcc30918929e7b8", size = 30756 }, -] - [[package]] name = "pyyaml" version = "6.0.2" @@ -1378,20 +1229,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/19/87/5124b1c1f2412bb95c59ec481eaf936cd32f0fe2a7b16b97b81c4c017a6a/PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", size = 162312 }, ] -[[package]] -name = "readme-renderer" -version = "44.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "docutils" }, - { name = "nh3" }, - { name = "pygments" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/5a/a9/104ec9234c8448c4379768221ea6df01260cd6c2ce13182d4eac531c8342/readme_renderer-44.0.tar.gz", hash = "sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1", size = 32056 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl", hash = "sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151", size = 13310 }, -] - [[package]] name = "regex" version = "2024.11.6" @@ -1492,41 +1329,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, ] -[[package]] -name = "requests-toolbelt" -version = "1.0.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "requests" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/f3/61/d7545dafb7ac2230c70d38d31cbfe4cc64f7144dc41f6e4e4b78ecd9f5bb/requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6", size = 206888 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481 }, -] - -[[package]] -name = "rfc3986" -version = "2.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/85/40/1520d68bfa07ab5a6f065a186815fb6610c86fe957bc065754e47f7b0840/rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c", size = 49026 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ff/9a/9afaade874b2fa6c752c36f1548f718b5b83af81ed9b76628329dab81c1b/rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd", size = 31326 }, -] - -[[package]] -name = "rich" -version = "13.9.4" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "markdown-it-py" }, - { name = "pygments" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ab/3a/0316b28d0761c6734d6bc14e770d85506c986c85ffb239e688eeaab2c2bc/rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098", size = 223149 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90", size = 242424 }, -] - [[package]] name = "ruamel-yaml" version = "0.18.10" @@ -1639,19 +1441,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/86/ca/aa489392ec6fb59223ffce825461e1f811a3affd417121a2088be7a5758b/safetensors-0.5.2-cp38-abi3-win_amd64.whl", hash = "sha256:78abdddd03a406646107f973c7843276e7b64e5e32623529dc17f3d94a20f589", size = 303756 }, ] -[[package]] -name = "secretstorage" -version = "3.3.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cryptography" }, - { name = "jeepney" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/53/a4/f48c9d79cb507ed1373477dbceaba7401fd8a23af63b837fa61f1dcd3691/SecretStorage-3.3.3.tar.gz", hash = "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77", size = 19739 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/54/24/b4293291fa1dd830f353d2cb163295742fa87f179fcc8a20a306a81978b7/SecretStorage-3.3.3-py3-none-any.whl", hash = "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99", size = 15221 }, -] - [[package]] name = "setuptools" version = "75.8.0" @@ -2051,11 +1840,9 @@ dependencies = [ [package.dev-dependencies] dev = [ - { name = "build" }, { name = "pyright", extra = ["nodejs"] }, { name = "pytest" }, { name = "ruff" }, - { name = "twine" }, ] docs = [ { name = "furo" }, @@ -2079,11 +1866,9 @@ requires-dist = [ [package.metadata.requires-dev] dev = [ - { name = "build" }, - { name = "pyright", extras = ["nodejs"] }, - { name = "pytest" }, - { name = "ruff" }, - { name = "twine" }, + { name = "pyright", extras = ["nodejs"], specifier = "==1.1.393" }, + { name = "pytest", specifier = "==8.3.4" }, + { name = "ruff", specifier = "==0.9.5" }, ] docs = [ { name = "furo", specifier = "==2024.8.6" }, @@ -2143,27 +1928,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bc/74/9f12bdedeb110242d8bb1bd621f6605e753ee0cbf73cf7f3a62b8173f190/triton-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30ceed0eff2c4a73b14eb63e052992f44bbdf175f3fad21e1ac8097a772de7ee", size = 253057866 }, ] -[[package]] -name = "twine" -version = "6.1.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "id" }, - { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, - { name = "keyring", marker = "platform_machine != 'ppc64le' and platform_machine != 's390x'" }, - { name = "packaging" }, - { name = "readme-renderer" }, - { name = "requests" }, - { name = "requests-toolbelt" }, - { name = "rfc3986" }, - { name = "rich" }, - { name = "urllib3" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/c8/a2/6df94fc5c8e2170d21d7134a565c3a8fb84f9797c1dd65a5976aaf714418/twine-6.1.0.tar.gz", hash = "sha256:be324f6272eff91d07ee93f251edf232fc647935dd585ac003539b42404a8dbd", size = 168404 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/b6/74e927715a285743351233f33ea3c684528a0d374d2e43ff9ce9585b73fe/twine-6.1.0-py3-none-any.whl", hash = "sha256:a47f973caf122930bf0fbbf17f80b83bc1602c9ce393c7845f289a3001dc5384", size = 40791 }, -] - [[package]] name = "typing-extensions" version = "4.12.2" From 777541d3dfad6a8454e090d1e275a19b37f7917a Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 12:58:02 -0500 Subject: [PATCH 082/141] updated pyproject --- README.md | 2 +- pyproject.toml | 16 ++-------------- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 4a7addac..74252252 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # torchrunx 🔥 -[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/torchrunx)](https://github.com/apoorvkh/torchrunx/blob/main/pyproject.toml) +[![Python Version](https://img.shields.io/python/required-version-toml?tomlFilePath=https%3A%2F%2Fraw.githubusercontent.com%2Fapoorvkh%2Ftorchrunx%2Fmain%2Fpyproject.toml)](https://github.com/apoorvkh/torchrunx/blob/main/pyproject.toml) [![PyTorch Version](https://img.shields.io/badge/torch-%3E%3D2.0-orange)](https://github.com/pytorch/pytorch) [![PyPI - Version](https://img.shields.io/pypi/v/torchrunx)](https://pypi.org/project/torchrunx/) ![Tests](https://img.shields.io/github/actions/workflow/status/apoorvkh/torchrunx/.github%2Fworkflows%2Fmain.yml) diff --git a/pyproject.toml b/pyproject.toml index 6766a17e..58b2399a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,21 +12,14 @@ authors = [ description = "Automatically initialize distributed PyTorch environments" readme = "README.md" license = {file = "LICENSE"} -urls = { Repository = "https://github.com/apoorvkh/torchrunx.git", Documentation = "https://torchrunx.readthedocs.io" } -classifiers = [ - "Programming Language :: Python", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", -] +urls = { Repository = "https://github.com/apoorvkh/torchrunx.git", Documentation = "https://torchrun.xyz" } requires-python = ">=3.9" dependencies = [ "cloudpickle>=3.0", "fabric>=3.2", "torch>=2.0", # torch.distributed depends on numpy - # note: torch<=2.2 needs numpy<2 + # torch<=2.2 needs numpy<2 "numpy>=1.20", ] [dependency-groups] @@ -34,11 +27,6 @@ dev = ["ruff==0.9.5", "pyright[nodejs]==1.1.393", "pytest==8.3.4"] test-extras = ["submitit", "transformers"] docs = ["sphinx==7.4.7", "furo==2024.8.6", "myst-parser==3.0.1", "sphinx-autodoc2==0.5.0", "sphinx-toolbox==3.8.1"] -[tool.uv] -managed = true -python-preference = "only-managed" - -## Development tools [tool.ruff] include = ["pyproject.toml", "src/**/*.py", "tests/**/*.py"] From 7e032251c5ac2a3cf853b53c7645ca4ff858eee7 Mon Sep 17 00:00:00 2001 From: "peter_curtin@brown.edu" Date: Sat, 8 Feb 2025 13:34:50 -0500 Subject: [PATCH 083/141] refactor accelerate train script --- .../examples/scripts/accelerate_train.py | 128 +++++++++++------- 1 file changed, 79 insertions(+), 49 deletions(-) diff --git a/docs/source/examples/scripts/accelerate_train.py b/docs/source/examples/scripts/accelerate_train.py index 6cb08937..60047b67 100644 --- a/docs/source/examples/scripts/accelerate_train.py +++ b/docs/source/examples/scripts/accelerate_train.py @@ -7,85 +7,115 @@ # "torch", # "torchrunx", # "transformers", +# "tyro", # ] # /// -from pathlib import Path +import functools +import os +from dataclasses import dataclass +from typing import Annotated import torch from accelerate import Accelerator from datasets import load_dataset -from torch import nn from torch.utils.data import Dataset -from transformers import AutoModelForCausalLM, AutoTokenizer +from transformers import AutoModelForCausalLM, PreTrainedModel, AutoTokenizer import torchrunx +import tyro + +@dataclass +class ModelConfig: + name: str + + +@dataclass +class DatasetConfig: + path: str + name: str | None = None + split: str | None = None + text_column: str = "text" + num_samples: int | None = None + +def load_training_data( + tokenizer_name: str, + dataset_config: DatasetConfig, +) -> Dataset: + # Load dataset + + dataset = load_dataset(dataset_config.path, name=dataset_config.name, split=dataset_config.split) + if dataset_config.num_samples is not None: + dataset = dataset.select(range(dataset_config.num_samples)) + + # Build tokenizer + + os.environ["TOKENIZERS_PARALLELISM"] = "false" # to suppress warnings + tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + tokenize_fn = functools.partial( + tokenizer, + max_length=tokenizer.model_max_length, + truncation=True, + padding="max_length", + ) + # Tokenize dataset -class GPT2CausalLMDataset(Dataset): - def __init__(self, text_dataset): - self.dataset = text_dataset - self.tokenizer = AutoTokenizer.from_pretrained("gpt2") - self.tokenizer.pad_token = self.tokenizer.eos_token - self.max_length = 1024 - - def __len__(self): - return len(self.dataset) - - def __getitem__(self, idx): - encoded = self.tokenizer( - self.dataset[idx]["text"], - max_length=self.max_length, - truncation=True, - padding="max_length", - return_tensors="pt", - ) - - input_ids = encoded.input_ids.squeeze() - attention_mask = encoded.attention_mask.squeeze() - labels = input_ids.clone() + return dataset.map( + tokenize_fn, + batched=True, + input_columns=[dataset_config.text_column], + remove_columns=[dataset_config.text_column], + ).map(lambda x: {"labels": x["input_ids"]}) - return { - "input_ids": input_ids, - "attention_mask": attention_mask, - "labels": labels, - } +def train( + model: PreTrainedModel, + train_dataset: Dataset, +) -> str: -def train(): accelerator = Accelerator() - model = AutoModelForCausalLM.from_pretrained("gpt2") optimizer = torch.optim.Adam(model.parameters()) - wikitext_train = load_dataset("Salesforce/wikitext", "wikitext-2-v1", split="train") - train_dataset = GPT2CausalLMDataset(text_dataset=wikitext_train) + train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=8) - loader = torch.utils.data.DataLoader(train_dataset, batch_size=8) - model, optimizer, loader = accelerator.prepare(model, optimizer, loader) + model, optimizer, train_dataloader = accelerator.prepare(model, optimizer, train_dataloader) model.train() - for batch_idx, batch in enumerate(loader): + for batch_idx, batch in enumerate(train_dataloader): if batch_idx == 10: break - print(f"Step {batch_idx}") - device_batch = {k: v.to(accelerator.device) for k, v in batch.items()} + device_batch = {k: torch.stack(v, dim=0).to(accelerator.device) for k, v in batch.items()} optimizer.zero_grad() loss = model(**device_batch).loss + print(f"Step {batch_idx}, loss: {loss.item()}", flush=True, end="") accelerator.backward(loss) optimizer.step() - return model + accelerator.unwrap_model(model).save_pretrained("output/") + return "output/" -if __name__ == "__main__": - Path("output").mkdir(exist_ok=True) - results = torchrunx.launch( - func=train, - hostnames=["localhost"], - workers_per_host=1, - ) +def main( + launcher: torchrunx.Launcher, + model_config: Annotated[ModelConfig, tyro.conf.arg(name="model")], + dataset_config: Annotated[DatasetConfig, tyro.conf.arg(name="dataset")], + # training_args: Annotated[TrainingArguments, tyro.conf.arg(name="trainer", help="")], +): + model = AutoModelForCausalLM.from_pretrained(model_config.name) + train_dataset = load_training_data(tokenizer_name=model_config.name, dataset_config=dataset_config) + + # Launch training + results = launcher.run(train, (model, train_dataset)) + + # Loading trained model from checkpoint + checkpoint_path = results.rank(0) + trained_model = AutoModelForCausalLM.from_pretrained(checkpoint_path) - trained_model: nn.Module = results.rank(0) - torch.save(trained_model.state_dict(), "output/model.pth") + +if __name__ == "__main__": + tyro.cli(main) From d5728e95c1997ff873b23b6bd202eed627ba68dd Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 21:24:26 -0500 Subject: [PATCH 084/141] update readme and tests --- .github/workflows/main.yml | 51 ++++++++++++++++++++++---------------- README.md | 4 +-- 2 files changed, 31 insertions(+), 24 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f61b1a9a..f475e6fa 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -27,40 +27,47 @@ jobs: ## - get-pytorch-versions: + get-python-pytorch-versions: runs-on: ubuntu-latest outputs: - versions: ${{ steps.get-pytorch-versions.outputs.versions }} + versions: ${{ steps.get-versions.outputs.versions }} steps: - - name: Get PyTorch versions - id: get-pytorch-versions + - name: "Get (Python, PyTorch) versions" + id: get-versions run: | - VERSIONS=$( - curl -s https://pypi.org/pypi/torch/json | jq -r '.releases | keys[]' | - # remove versions <2.0; strip "patch" from versions - grep -v '^1\.' | grep -E '\.[0]+$' | sort -V | sed 's/\.0$//' | - # to JSON array - jq -R . | jq -sc . + pytorch_versions=$( + curl -s https://pypi.org/pypi/torch/json | jq -r '.releases | keys[]' | + # remove versions <2.0; strip "patch" from versions + grep -v '^1\.' | grep -E '\.[0]+$' | sort -V | sed 's/\.0$//' ) - echo "versions=$VERSIONS" >> $GITHUB_OUTPUT - # e.g. ["2.0","2.1","2.2","2.3","2.4"] + version_matrix=() + for pytorch_version in $pytorch_versions; do + cp_versions=$(curl -s "https://pypi.org/pypi/torch/$pytorch_version/json" | jq -r '.urls[].filename | select(test("manylinux1_x86_64")) | capture("(?cp[0-9]+)-") | .cp') + for cp_version in $cp_versions; do + python_version=$(echo $cp_version | sed -E 's/cp([0-9])([0-9]+)/\1.\2/') + version_matrix+=($python_version,$pytorch_version) + done + done + version_matrix=$(printf '%s\n' "${version_matrix[@]}" | jq -R . | jq -s .) + echo "versions=$version_matrix" >> $GITHUB_OUTPUT + # e.g. [ "3.10,2.0", "3.11,2.0", "3.8,2.0", ... ] test: runs-on: ubuntu-latest - needs: get-pytorch-versions + needs: get-python-pytorch-versions strategy: fail-fast: false matrix: - python: ["3.9", "3.10", "3.11", "3.12"] - pytorch: ${{fromJson(needs.get-pytorch-versions.outputs.versions)}} + versions: ${{fromJson(needs.get-python-pytorch-versions.outputs.versions)}} steps: - - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v5 - with: - version: "0.5.29" - - if: contains('2.0,2.1,2.2', matrix.pytorch) - run: echo "NUMPY_VERSION=--with \"numpy<2\"" >> $GITHUB_ENV - - run: uv run --python ${{ matrix.python }} --with torch~=${{ matrix.pytorch }} ${{ env.NUMPY_VERSION }} pytest --verbose tests/test_ci.py + - run: echo ${{ matrix.versions }} + # - uses: actions/checkout@v4 + # - uses: astral-sh/setup-uv@v5 + # with: + # version: "0.5.29" + # - if: contains('2.0,2.1,2.2', matrix.pytorch) + # run: echo "NUMPY_VERSION=--with \"numpy<2\"" >> $GITHUB_ENV + # - run: uv run --python ${{ matrix.python }} --with torch~=${{ matrix.pytorch }} ${{ env.NUMPY_VERSION }} pytest --verbose tests/test_ci.py publish-docs: runs-on: ubuntu-latest diff --git a/README.md b/README.md index 74252252..27e373b5 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,8 @@ [![Python Version](https://img.shields.io/python/required-version-toml?tomlFilePath=https%3A%2F%2Fraw.githubusercontent.com%2Fapoorvkh%2Ftorchrunx%2Fmain%2Fpyproject.toml)](https://github.com/apoorvkh/torchrunx/blob/main/pyproject.toml) [![PyTorch Version](https://img.shields.io/badge/torch-%3E%3D2.0-orange)](https://github.com/pytorch/pytorch) [![PyPI - Version](https://img.shields.io/pypi/v/torchrunx)](https://pypi.org/project/torchrunx/) +[![Documentation](https://img.shields.io/badge/Documentation-blue)](https://torchrun.xyz) ![Tests](https://img.shields.io/github/actions/workflow/status/apoorvkh/torchrunx/.github%2Fworkflows%2Fmain.yml) -[![Docs](https://readthedocs.org/projects/torchrunx/badge/?version=stable)](https://torchrunx.readthedocs.io) [![GitHub License](https://img.shields.io/github/license/apoorvkh/torchrunx)](https://github.com/apoorvkh/torchrunx/blob/main/LICENSE) By [Apoorv Khandelwal](https://apoorvkh.com) and [Peter Curtin](https://github.com/pmcurtin) @@ -106,7 +106,7 @@ torch.save(trained_model.state_dict(), "output/model.pth") 4. **Better handling of system failures. No more zombies!** 🧟 -> With `torchrun`, your "work" is inherently coupled to your main Python process. If the system kills one of your workers (e.g. due to RAM OOM or segmentation faults), there is no way to fail gracefully in Python. Your processes might hang for at least 10 minutes (the NCCL timeout) or become perpetual zombies. +> With `torchrun`, your "work" is inherently coupled to your main Python process. If the system kills one of your workers (e.g. due to RAM OOM or segmentation faults), there is no way to fail gracefully in Python. Your processes might hang for 10 minutes (the NCCL timeout) or become perpetual zombies. > > > `torchrunx` decouples "launcher" and "worker" processes. If the system kills a worker, our launcher immediately raises a `WorkerFailure` exception, which users can handle as they wish. We always clean up all nodes, so no more zombies! From c76e83569b0e0c622ba7e40e31f3254a58ec15b0 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 21:25:56 -0500 Subject: [PATCH 085/141] test echo workflow --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f475e6fa..1fd4686a 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -49,7 +49,7 @@ jobs: done done version_matrix=$(printf '%s\n' "${version_matrix[@]}" | jq -R . | jq -s .) - echo "versions=$version_matrix" >> $GITHUB_OUTPUT + echo "versions=$version_matrix" # >> $GITHUB_OUTPUT # e.g. [ "3.10,2.0", "3.11,2.0", "3.8,2.0", ... ] test: From 4a268e204656da8819320ad0ad6f10a7b775324c Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 21:28:55 -0500 Subject: [PATCH 086/141] bump workflow --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 1fd4686a..397bef36 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -48,7 +48,7 @@ jobs: version_matrix+=($python_version,$pytorch_version) done done - version_matrix=$(printf '%s\n' "${version_matrix[@]}" | jq -R . | jq -s .) + version_matrix=$(printf '%s\n' "${version_matrix[@]}" | jq -R . | jq -s -c .) echo "versions=$version_matrix" # >> $GITHUB_OUTPUT # e.g. [ "3.10,2.0", "3.11,2.0", "3.8,2.0", ... ] From 8c6f407912a5f59e057ebb75e68be797ae9edf9d Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 21:31:04 -0500 Subject: [PATCH 087/141] bump workflow again --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 397bef36..4a4c1640 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -49,7 +49,7 @@ jobs: done done version_matrix=$(printf '%s\n' "${version_matrix[@]}" | jq -R . | jq -s -c .) - echo "versions=$version_matrix" # >> $GITHUB_OUTPUT + echo "versions=$version_matrix" >> $GITHUB_OUTPUT # e.g. [ "3.10,2.0", "3.11,2.0", "3.8,2.0", ... ] test: From 27720bf2330366e894979998ea6e636d3e57b9d8 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 22:01:07 -0500 Subject: [PATCH 088/141] bump testing workflow --- .github/workflows/main.yml | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 4a4c1640..ad59dde4 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -35,11 +35,18 @@ jobs: - name: "Get (Python, PyTorch) versions" id: get-versions run: | + # Get PyTorch versions (>=2.0) from PyPI + pytorch_versions=$( curl -s https://pypi.org/pypi/torch/json | jq -r '.releases | keys[]' | # remove versions <2.0; strip "patch" from versions grep -v '^1\.' | grep -E '\.[0]+$' | sort -V | sed 's/\.0$//' ) + + # For each PyTorch version, get Python versions that have builds + # Generate JSON list of "python,pytorch" versions + # e.g. [ "3.10,2.0", "3.11,2.0", "3.8,2.0", ... ] + version_matrix=() for pytorch_version in $pytorch_versions; do cp_versions=$(curl -s "https://pypi.org/pypi/torch/$pytorch_version/json" | jq -r '.urls[].filename | select(test("manylinux1_x86_64")) | capture("(?cp[0-9]+)-") | .cp') @@ -49,8 +56,9 @@ jobs: done done version_matrix=$(printf '%s\n' "${version_matrix[@]}" | jq -R . | jq -s -c .) + + # Write to outputs echo "versions=$version_matrix" >> $GITHUB_OUTPUT - # e.g. [ "3.10,2.0", "3.11,2.0", "3.8,2.0", ... ] test: runs-on: ubuntu-latest @@ -60,14 +68,18 @@ jobs: matrix: versions: ${{fromJson(needs.get-python-pytorch-versions.outputs.versions)}} steps: - - run: echo ${{ matrix.versions }} - # - uses: actions/checkout@v4 - # - uses: astral-sh/setup-uv@v5 - # with: - # version: "0.5.29" - # - if: contains('2.0,2.1,2.2', matrix.pytorch) - # run: echo "NUMPY_VERSION=--with \"numpy<2\"" >> $GITHUB_ENV - # - run: uv run --python ${{ matrix.python }} --with torch~=${{ matrix.pytorch }} ${{ env.NUMPY_VERSION }} pytest --verbose tests/test_ci.py + - run: | + IFS=',' read -r python_version pytorch_version <<< ${{ matrix.versions }} + echo "PYTHON_VERSION=$python_version" >> $GITHUB_ENV + echo "PYTORCH_VERSION=$pytorch_version" >> $GITHUB_ENV + if [[ "$pytorch_version" =~ ^2\.(0|1|2)$ ]]; then + echo "NUMPY_VERSION=--with \"numpy<2\"" >> $GITHUB_ENV + fi + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v5 + with: + version: "0.5.29" + - run: uv run --python ${{ env.PYTHON_VERSION }} --with torch~=${{ env.PYTORCH_VERSION }} ${{ env.NUMPY_VERSION }} pytest --verbose tests/test_ci.py publish-docs: runs-on: ubuntu-latest From f2049c59c498f87573b28f32624d0e26c7fc122c Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 23:03:43 -0500 Subject: [PATCH 089/141] final update for testing script --- .github/workflows/main.yml | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ad59dde4..f2dd43bf 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -35,23 +35,34 @@ jobs: - name: "Get (Python, PyTorch) versions" id: get-versions run: | - # Get PyTorch versions (>=2.0) from PyPI + MIN_PYTHON_VERSION=3.9 + MIN_PYTORCH_VERSION=2.0 + # Get PyTorch versions from PyPI pytorch_versions=$( curl -s https://pypi.org/pypi/torch/json | jq -r '.releases | keys[]' | - # remove versions <2.0; strip "patch" from versions - grep -v '^1\.' | grep -E '\.[0]+$' | sort -V | sed 's/\.0$//' + # strip "patch" from versions + grep -E '\.[0]+$' | sort -V | sed 's/\.0$//' ) # For each PyTorch version, get Python versions that have builds # Generate JSON list of "python,pytorch" versions - # e.g. [ "3.10,2.0", "3.11,2.0", "3.8,2.0", ... ] version_matrix=() for pytorch_version in $pytorch_versions; do - cp_versions=$(curl -s "https://pypi.org/pypi/torch/$pytorch_version/json" | jq -r '.urls[].filename | select(test("manylinux1_x86_64")) | capture("(?cp[0-9]+)-") | .cp') - for cp_version in $cp_versions; do - python_version=$(echo $cp_version | sed -E 's/cp([0-9])([0-9]+)/\1.\2/') + # Skip if PyTorch version less than minium + if [[ "$(printf '%s\n' "$pytorch_version" "$MIN_PYTORCH_VERSION" | sort -V | head -n 1)" != "$MIN_PYTORCH_VERSION" ]]; then continue; fi + + python_versions=$( + curl -s "https://pypi.org/pypi/torch/$pytorch_version/json" | + jq -r '.urls[].filename | select(test("manylinux1_x86_64")) | capture("(?cp[0-9]+)-") | .cp | + sub("cp(?[0-9])(?[0-9]+)"; "\(.major).\(.minor)")' + ) + + for python_version in $python_versions; do + # Skip if Python version less than minium + if [[ "$(printf '%s\n' "$python_version" "$MIN_PYTHON_VERSION" | sort -V | head -n 1)" != "$MIN_PYTHON_VERSION" ]]; then continue; fi + version_matrix+=($python_version,$pytorch_version) done done From 097c268df6fbea65183e9e58a484906abdf81e70 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 23:06:31 -0500 Subject: [PATCH 090/141] spacing in (python, pytorch) version --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f2dd43bf..e38975cc 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -63,7 +63,7 @@ jobs: # Skip if Python version less than minium if [[ "$(printf '%s\n' "$python_version" "$MIN_PYTHON_VERSION" | sort -V | head -n 1)" != "$MIN_PYTHON_VERSION" ]]; then continue; fi - version_matrix+=($python_version,$pytorch_version) + version_matrix+=($python_version, $pytorch_version) done done version_matrix=$(printf '%s\n' "${version_matrix[@]}" | jq -R . | jq -s -c .) @@ -80,7 +80,7 @@ jobs: versions: ${{fromJson(needs.get-python-pytorch-versions.outputs.versions)}} steps: - run: | - IFS=',' read -r python_version pytorch_version <<< ${{ matrix.versions }} + IFS=', ' read -r python_version pytorch_version <<< ${{ matrix.versions }} echo "PYTHON_VERSION=$python_version" >> $GITHUB_ENV echo "PYTORCH_VERSION=$pytorch_version" >> $GITHUB_ENV if [[ "$pytorch_version" =~ ^2\.(0|1|2)$ ]]; then From 0a4c7b884445dfc602ac450660ef845a243a6b26 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 23:07:55 -0500 Subject: [PATCH 091/141] undo --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index e38975cc..f2dd43bf 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -63,7 +63,7 @@ jobs: # Skip if Python version less than minium if [[ "$(printf '%s\n' "$python_version" "$MIN_PYTHON_VERSION" | sort -V | head -n 1)" != "$MIN_PYTHON_VERSION" ]]; then continue; fi - version_matrix+=($python_version, $pytorch_version) + version_matrix+=($python_version,$pytorch_version) done done version_matrix=$(printf '%s\n' "${version_matrix[@]}" | jq -R . | jq -s -c .) @@ -80,7 +80,7 @@ jobs: versions: ${{fromJson(needs.get-python-pytorch-versions.outputs.versions)}} steps: - run: | - IFS=', ' read -r python_version pytorch_version <<< ${{ matrix.versions }} + IFS=',' read -r python_version pytorch_version <<< ${{ matrix.versions }} echo "PYTHON_VERSION=$python_version" >> $GITHUB_ENV echo "PYTORCH_VERSION=$pytorch_version" >> $GITHUB_ENV if [[ "$pytorch_version" =~ ^2\.(0|1|2)$ ]]; then From f5365bd41146cea938ea3a874490bedecad1604b Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 23:14:15 -0500 Subject: [PATCH 092/141] add uv lock --check --- .github/workflows/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f2dd43bf..80267fd1 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -17,6 +17,7 @@ jobs: with: version: "0.5.29" enable-cache: true + - run: uv lock --check - run: uv sync - run: uv run --frozen ruff check if: success() || failure() From 3db456619c9da17671f9e4745da0a4be921e5208 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 23:18:37 -0500 Subject: [PATCH 093/141] add docs html build --- .github/workflows/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 80267fd1..958b7147 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -113,3 +113,8 @@ jobs: path: docs/_build/html - id: deployment uses: actions/deploy-pages@v4 + - uses: actions/upload-artifact@v4 + with: + name: docs-html-build + path: docs/_build/html + retention-days: 14 From c2ebf707f42cf6c5d4d4516c6e99bd0d344da21c Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 8 Feb 2025 23:38:24 -0500 Subject: [PATCH 094/141] not deploying docs --- .github/workflows/main.yml | 48 ++++++++++++++------------------------ 1 file changed, 18 insertions(+), 30 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 958b7147..a73121b8 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -26,6 +26,20 @@ jobs: - run: uv run --frozen pyright if: success() || failure() + build-docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v5 + with: + version: "0.5.29" + - run: uv run --group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html + - uses: actions/upload-artifact@v4 + with: + name: docs-html-build + path: docs/_build/html + retention-days: 14 + ## get-python-pytorch-versions: @@ -80,6 +94,10 @@ jobs: matrix: versions: ${{fromJson(needs.get-python-pytorch-versions.outputs.versions)}} steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v5 + with: + version: "0.5.29" - run: | IFS=',' read -r python_version pytorch_version <<< ${{ matrix.versions }} echo "PYTHON_VERSION=$python_version" >> $GITHUB_ENV @@ -87,34 +105,4 @@ jobs: if [[ "$pytorch_version" =~ ^2\.(0|1|2)$ ]]; then echo "NUMPY_VERSION=--with \"numpy<2\"" >> $GITHUB_ENV fi - - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v5 - with: - version: "0.5.29" - run: uv run --python ${{ env.PYTHON_VERSION }} --with torch~=${{ env.PYTORCH_VERSION }} ${{ env.NUMPY_VERSION }} pytest --verbose tests/test_ci.py - - publish-docs: - runs-on: ubuntu-latest - permissions: - pages: write - id-token: write - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - steps: - - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v5 - with: - version: "0.5.29" - - run: uv run --group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html - - uses: actions/configure-pages@v5 - - uses: actions/upload-pages-artifact@v3 - with: - path: docs/_build/html - - id: deployment - uses: actions/deploy-pages@v4 - - uses: actions/upload-artifact@v4 - with: - name: docs-html-build - path: docs/_build/html - retention-days: 14 From b9655c73fd03c8a1590f122679989140391eb03d Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 9 Feb 2025 01:21:22 -0500 Subject: [PATCH 095/141] edit launch result api --- docs/source/examples/composer.md | 1 + docs/source/index.rst | 7 +++++-- src/torchrunx/launcher.py | 24 ++++++++++-------------- tests/test_func.py | 4 +++- 4 files changed, 19 insertions(+), 17 deletions(-) create mode 100644 docs/source/examples/composer.md diff --git a/docs/source/examples/composer.md b/docs/source/examples/composer.md new file mode 100644 index 00000000..baba39a5 --- /dev/null +++ b/docs/source/examples/composer.md @@ -0,0 +1 @@ +# MosaicML Composer diff --git a/docs/source/index.rst b/docs/source/index.rst index 48becef9..460ba691 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -12,9 +12,12 @@ .. toctree:: :caption: Examples :hidden: - :glob: - ./examples/* + ./examples/transformers.md + ./examples/accelerate.md + ./examples/deepspeed.md + ./examples/lightning.md + ./examples/composer.md .. sidebar-links:: :github: diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index ddd8a217..0904e5ea 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -14,10 +14,9 @@ import subprocess import sys from dataclasses import dataclass -from functools import partial, reduce +from functools import partial from logging import Handler from multiprocessing import Event, Process -from operator import add from pathlib import Path from typing import Any, Callable, Literal @@ -265,24 +264,21 @@ def launch( class LaunchResult: """Container for objects returned from workers after successful launches.""" - hostnames: list[str] - return_values: list[list[Any]] - - def by_hostnames(self) -> dict[str, list[Any]]: - """All return values from workers, indexed by host and local rank.""" - return dict(zip(self.hostnames, self.return_values)) - - def by_ranks(self) -> list[Any]: - """All return values from workers, indexed by global rank.""" - return reduce(add, self.return_values) + def __init__(self, hostnames: list[str], return_values: list[list[Any]]) -> None: + """Initialize from corresponding lists of hostnames and worker return values.""" + self.results: dict[str, list[Any]] = dict(zip(hostnames, return_values)) def index(self, hostname: str, rank: int) -> Any: """Get return value from worker by host and local rank.""" - return self.return_values[self.hostnames.index(hostname)][rank] + return self.results[hostname][rank] def rank(self, i: int) -> Any: """Get return value from worker by global rank.""" - return self.by_ranks()[i] + for results_per_host in self.results.values(): + if i < len(results_per_host): + return results_per_host[i] + i -= len(results_per_host) + raise IndexError def _resolve_hostnames(hostnames: list[str] | Literal["auto", "slurm"]) -> list[str]: diff --git a/tests/test_func.py b/tests/test_func.py index 7b3ad7f6..3f4481ca 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -1,4 +1,6 @@ import os +from functools import reduce +from operator import add import torch import torch.distributed as dist @@ -13,7 +15,7 @@ def test_launch() -> None: workers_per_host="slurm", ) - result_values = result.by_ranks() + result_values = reduce(add, result.results.values()) t = True for i in range(len(result_values)): From ffc45d1ecef755dbac0e21e255528e2017d442d1 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 9 Feb 2025 12:20:09 -0500 Subject: [PATCH 096/141] moving def launch() to top --- src/torchrunx/launcher.py | 136 +++++++++++++++++++------------------- 1 file changed, 68 insertions(+), 68 deletions(-) diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 0904e5ea..f51ead2d 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -36,6 +36,74 @@ from .utils.logging_server import LoggingServerArgs, start_logging_server +def launch( + func: Callable, + func_args: tuple | None = None, + func_kwargs: dict[str, Any] | None = None, + hostnames: list[str] | Literal["auto", "slurm"] = "auto", + workers_per_host: int | list[int] | Literal["auto", "slurm"] = "auto", + ssh_config_file: str | os.PathLike | None = None, + backend: Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None = "auto", + timeout: int = 600, + default_env_vars: tuple[str, ...] = ( + "PATH", + "LD_LIBRARY", + "LIBRARY_PATH", + "PYTHON*", + "CUDA*", + "TORCH*", + "PYTORCH*", + "NCCL*", + ), + extra_env_vars: tuple[str, ...] = (), + env_file: str | os.PathLike | None = None, + handler_factory: Callable[[], list[Handler]] | Literal["auto"] | None = "auto", +) -> LaunchResult: + """Launch a distributed PyTorch function on the specified nodes. + + Arguments: + func: Function to run on each worker. + func_args: Positional arguments for ``func``. + func_kwargs: Keyword arguments for ``func``. + hostnames: Nodes on which to launch the function. + Defaults to nodes inferred from a SLURM environment or localhost. + workers_per_host: Number of processes to run per node. + Can specify different counts per node with a list. + ssh_config_file: Path to an SSH configuration file for connecting to nodes. + Defaults to ``~/.ssh/config`` or ``/etc/ssh/ssh_config``. + backend: `Backend `_ + for worker process group. Defaults to NCCL (GPU) or GLOO (CPU). Set `None` to disable. + timeout: Worker process group timeout (seconds). + default_env_vars: Environment variables to copy from the launcher process to workers. + Supports bash pattern matching syntax. + extra_env_vars: Additional user-specified environment variables to copy. + env_file: Path to a file (e.g., `.env`) with additional environment variables to copy. + handler_factory: Function to build logging handlers that process agent and worker logs. + Defaults to an automatic basic logging scheme. + + Raises: + RuntimeError: If there are configuration issues. + AgentFailedError: If an agent fails, e.g. from an OS signal. + WorkerFailedError: If a worker fails, e.g. from a segmentation fault. + Exception: Any exception raised in a worker process is propagated. + """ + return Launcher( + hostnames=hostnames, + workers_per_host=workers_per_host, + ssh_config_file=ssh_config_file, + backend=backend, + timeout=timeout, + default_env_vars=default_env_vars, + extra_env_vars=extra_env_vars, + env_file=env_file, + ).run( + func=func, + func_args=func_args, + func_kwargs=func_kwargs, + handler_factory=handler_factory, + ) + + @dataclass class Launcher: """Useful for sequential invocations or for specifying arguments via CLI.""" @@ -192,74 +260,6 @@ def run( # noqa: C901, PLR0912 return LaunchResult(hostnames=hostnames, return_values=return_values) -def launch( - func: Callable, - func_args: tuple | None = None, - func_kwargs: dict[str, Any] | None = None, - hostnames: list[str] | Literal["auto", "slurm"] = "auto", - workers_per_host: int | list[int] | Literal["auto", "slurm"] = "auto", - ssh_config_file: str | os.PathLike | None = None, - backend: Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None = "auto", - timeout: int = 600, - default_env_vars: tuple[str, ...] = ( - "PATH", - "LD_LIBRARY", - "LIBRARY_PATH", - "PYTHON*", - "CUDA*", - "TORCH*", - "PYTORCH*", - "NCCL*", - ), - extra_env_vars: tuple[str, ...] = (), - env_file: str | os.PathLike | None = None, - handler_factory: Callable[[], list[Handler]] | Literal["auto"] | None = "auto", -) -> LaunchResult: - """Launch a distributed PyTorch function on the specified nodes. - - Arguments: - func: Function to run on each worker. - func_args: Positional arguments for ``func``. - func_kwargs: Keyword arguments for ``func``. - hostnames: Nodes on which to launch the function. - Defaults to nodes inferred from a SLURM environment or localhost. - workers_per_host: Number of processes to run per node. - Can specify different counts per node with a list. - ssh_config_file: Path to an SSH configuration file for connecting to nodes. - Defaults to ``~/.ssh/config`` or ``/etc/ssh/ssh_config``. - backend: `Backend `_ - for worker process group. Defaults to NCCL (GPU) or GLOO (CPU). Set `None` to disable. - timeout: Worker process group timeout (seconds). - default_env_vars: Environment variables to copy from the launcher process to workers. - Supports bash pattern matching syntax. - extra_env_vars: Additional user-specified environment variables to copy. - env_file: Path to a file (e.g., `.env`) with additional environment variables to copy. - handler_factory: Function to build logging handlers that process agent and worker logs. - Defaults to an automatic basic logging scheme. - - Raises: - RuntimeError: If there are configuration issues. - AgentFailedError: If an agent fails, e.g. from an OS signal. - WorkerFailedError: If a worker fails, e.g. from a segmentation fault. - Exception: Any exception raised in a worker process is propagated. - """ - return Launcher( - hostnames=hostnames, - workers_per_host=workers_per_host, - ssh_config_file=ssh_config_file, - backend=backend, - timeout=timeout, - default_env_vars=default_env_vars, - extra_env_vars=extra_env_vars, - env_file=env_file, - ).run( - func=func, - func_args=func_args, - func_kwargs=func_kwargs, - handler_factory=handler_factory, - ) - - @dataclass class LaunchResult: """Container for objects returned from workers after successful launches.""" From c2d51bf2948e842ad1b27795bab20270bacd48ac Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 9 Feb 2025 13:53:18 -0500 Subject: [PATCH 097/141] update launcher API --- src/torchrunx/launcher.py | 91 +++++++++++++++++++++++---------------- 1 file changed, 55 insertions(+), 36 deletions(-) diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index f51ead2d..274837c0 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -38,8 +38,9 @@ def launch( func: Callable, - func_args: tuple | None = None, - func_kwargs: dict[str, Any] | None = None, + args: tuple | None = None, + kwargs: dict[str, Any] | None = None, + *, hostnames: list[str] | Literal["auto", "slurm"] = "auto", workers_per_host: int | list[int] | Literal["auto", "slurm"] = "auto", ssh_config_file: str | os.PathLike | None = None, @@ -57,29 +58,30 @@ def launch( ), extra_env_vars: tuple[str, ...] = (), env_file: str | os.PathLike | None = None, + propagate_exceptions: bool = True, handler_factory: Callable[[], list[Handler]] | Literal["auto"] | None = "auto", ) -> LaunchResult: - """Launch a distributed PyTorch function on the specified nodes. + """Distribute and parallelize a function onto specified nodes and workers. Arguments: - func: Function to run on each worker. - func_args: Positional arguments for ``func``. - func_kwargs: Keyword arguments for ``func``. + func: Function to launch on each node and replicate for each worker. + args: Positional arguments for ``func``. + kwargs: Keyword arguments for ``func``. hostnames: Nodes on which to launch the function. - Defaults to nodes inferred from a SLURM environment or localhost. - workers_per_host: Number of processes to run per node. - Can specify different counts per node with a list. + Default: infer from localhost or SLURM. + workers_per_host: Number of processes to run (e.g. # of GPUs) per node. ssh_config_file: Path to an SSH configuration file for connecting to nodes. - Defaults to ``~/.ssh/config`` or ``/etc/ssh/ssh_config``. + Default: ``~/.ssh/config`` or ``/etc/ssh/ssh_config``. backend: `Backend `_ - for worker process group. Defaults to NCCL (GPU) or GLOO (CPU). Set `None` to disable. + for worker process group. Set `None` to disable. Default: NCCL (GPU) or GLOO (CPU). timeout: Worker process group timeout (seconds). default_env_vars: Environment variables to copy from the launcher process to workers. Supports bash pattern matching syntax. extra_env_vars: Additional user-specified environment variables to copy. - env_file: Path to a file (e.g., `.env`) with additional environment variables to copy. - handler_factory: Function to build logging handlers that process agent and worker logs. - Defaults to an automatic basic logging scheme. + env_file: Path to a file (e.g., ``.env``) with additional environment variables to copy. + propagate_exceptions: Raise exceptions from worker processes in the launcher. + If false, raises :obj:`WorkerFailedError` instead. + handler_factory: Function to customize processing of agent and worker logs with handlers. Raises: RuntimeError: If there are configuration issues. @@ -87,20 +89,24 @@ def launch( WorkerFailedError: If a worker fails, e.g. from a segmentation fault. Exception: Any exception raised in a worker process is propagated. """ - return Launcher( - hostnames=hostnames, - workers_per_host=workers_per_host, - ssh_config_file=ssh_config_file, - backend=backend, - timeout=timeout, - default_env_vars=default_env_vars, - extra_env_vars=extra_env_vars, - env_file=env_file, - ).run( - func=func, - func_args=func_args, - func_kwargs=func_kwargs, - handler_factory=handler_factory, + return ( + Launcher( + hostnames=hostnames, + workers_per_host=workers_per_host, + ssh_config_file=ssh_config_file, + backend=backend, + timeout=timeout, + default_env_vars=default_env_vars, + extra_env_vars=extra_env_vars, + env_file=env_file, + propagate_exceptions=propagate_exceptions, + ) + .set_handler_factory(handler_factory) + .run( + func, + args, + kwargs, + ) ) @@ -125,13 +131,24 @@ class Launcher: ) extra_env_vars: tuple[str, ...] = () env_file: str | os.PathLike | None = None + propagate_exceptions: bool = True + + def __post_init__(self) -> None: + """Initializing ``handler_factory``. Inclusion in ``__init__`` inhibits CLI generation.""" + self.handler_factory: Callable[[], list[Handler]] | Literal["auto"] | None = "auto" + + def set_handler_factory( + self, factory: Callable[[], list[Handler]] | Literal["auto"] | None + ) -> Launcher: + """Setter for log handler factory.""" + self.handler_factory = factory + return self def run( # noqa: C901, PLR0912 self, func: Callable, - func_args: tuple | None = None, - func_kwargs: dict[str, Any] | None = None, - handler_factory: Callable[[], list[Handler]] | Literal["auto"] | None = "auto", + args: tuple | None = None, + kwargs: dict[str, Any] | None = None, ) -> LaunchResult: """Run a function using the :mod:`torchrunx.Launcher` configuration.""" if not dist.is_available(): @@ -155,7 +172,7 @@ def run( # noqa: C901, PLR0912 # Start logging server (recieves LogRecords from agents/workers) logging_server_args = LoggingServerArgs( - handler_factory=handler_factory, + handler_factory=self.handler_factory, logging_hostname=launcher_hostname, logging_port=logging_port, hostnames=hostnames, @@ -211,7 +228,7 @@ def run( # noqa: C901, PLR0912 ] payload = LauncherPayload( - fn=partial(func, *(func_args or ()), **(func_kwargs or {})), + fn=partial(func, *(args or ()), **(kwargs or {})), hostnames=hostnames, worker_global_ranks=worker_global_ranks, worker_world_size=sum(workers_per_host), @@ -231,7 +248,9 @@ def run( # noqa: C901, PLR0912 for s in agent_statuses: for value in s.return_values: if isinstance(value, ExceptionFromWorker): - raise value.exception + if self.propagate_exceptions: + raise value.exception + raise WorkerFailedError from value.exception if isinstance(value, WorkerFailedError): raise value @@ -268,9 +287,9 @@ def __init__(self, hostnames: list[str], return_values: list[list[Any]]) -> None """Initialize from corresponding lists of hostnames and worker return values.""" self.results: dict[str, list[Any]] = dict(zip(hostnames, return_values)) - def index(self, hostname: str, rank: int) -> Any: + def index(self, hostname: str, locak_rank: int) -> Any: """Get return value from worker by host and local rank.""" - return self.results[hostname][rank] + return self.results[hostname][locak_rank] def rank(self, i: int) -> Any: """Get return value from worker by global rank.""" From 77e12ef08fb61f0f8eead3c57438b794cab7cf46 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 9 Feb 2025 13:53:27 -0500 Subject: [PATCH 098/141] fix tests --- tests/test_ci.py | 9 +++------ tests/test_submitit.py | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/tests/test_ci.py b/tests/test_ci.py index 79d89433..d2ac114c 100644 --- a/tests/test_ci.py +++ b/tests/test_ci.py @@ -31,8 +31,7 @@ def dist_func() -> torch.Tensor: os.environ["TORCHRUNX_DIR"] = tmp r = trx.launch( - func=dist_func, - func_kwargs={}, + dist_func, workers_per_host=2, backend="gloo", # log_dir="./test_logs" ) @@ -51,8 +50,7 @@ def dist_func() -> None: num_workers = 2 trx.launch( - func=dist_func, - func_kwargs={}, + dist_func, workers_per_host=num_workers, backend="gloo", ) @@ -81,8 +79,7 @@ def error_func() -> NoReturn: with pytest.raises(ValueError) as excinfo: # noqa: PT011 trx.launch( - func=error_func, - func_kwargs={}, + error_func, workers_per_host=1, backend="gloo", ) diff --git a/tests/test_submitit.py b/tests/test_submitit.py index b3c3b48c..500665ca 100644 --- a/tests/test_submitit.py +++ b/tests/test_submitit.py @@ -53,7 +53,7 @@ def main() -> None: def launch() -> None: - trx.launch(func=main, func_kwargs={}, hostnames="slurm", workers_per_host="slurm") + trx.launch(main, hostnames="slurm", workers_per_host="slurm") def test_submitit() -> None: From 2331baa84ea7e3c6ba5c5eba7d665e2e022e4f5e Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 9 Feb 2025 14:03:37 -0500 Subject: [PATCH 099/141] restructure docs features --- docs/source/advanced.md | 104 -------------------------- docs/source/features/cli.md | 38 ++++++++++ docs/source/features/customization.md | 35 +++++++++ docs/source/features/slurm.md | 4 + docs/source/features/workflows.md | 26 +++++++ docs/source/index.rst | 10 ++- 6 files changed, 112 insertions(+), 105 deletions(-) delete mode 100644 docs/source/advanced.md create mode 100644 docs/source/features/cli.md create mode 100644 docs/source/features/customization.md create mode 100644 docs/source/features/slurm.md create mode 100644 docs/source/features/workflows.md diff --git a/docs/source/advanced.md b/docs/source/advanced.md deleted file mode 100644 index a0eedd84..00000000 --- a/docs/source/advanced.md +++ /dev/null @@ -1,104 +0,0 @@ -# Advanced Usage - -## Multiple functions in one script - -We could also launch multiple functions (e.g. train on many GPUs, test on one GPU): - -```python -import torchrunx as trx - -trained_model = trx.launch( - func=train, - hostnames=["node1", "node2"], - workers_per_host=8 -).rank(0) - -accuracy = trx.launch( - func=test, - func_args=(trained_model,), - hostnames=["localhost"], - workers_per_host=1 -).rank(0) - -print(f'Accuracy: {accuracy}') -``` - -{mod}`torchrunx.launch` is self-cleaning: all processes are terminated (and the used memory is completely released) before the subsequent invocation. - -## CLI integration - -We can use {mod}`torchrunx.Launcher` to populate arguments from the CLI (e.g. with [tyro](https://brentyi.github.io/tyro/)): - -```python -import torchrunx as trx -import tyro - -def distributed_function(): - pass - -if __name__ == "__main__": - launcher = tyro.cli(trx.Launcher) - launcher.run(distributed_function) -``` - -`python ... --help` then results in: - -```bash -╭─ options ─────────────────────────────────────────────╮ -│ -h, --help show this help message and exit │ -│ --hostnames {[STR [STR ...]]}|{auto,slurm} │ -│ (default: auto) │ -│ --workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ -│ (default: auto) │ -│ --ssh-config-file {None}|STR|PATH │ -│ (default: None) │ -│ --backend {None,nccl,gloo,mpi,ucc,auto} │ -│ (default: auto) │ -│ --timeout INT (default: 600) │ -│ --default-env-vars [STR [STR ...]] │ -│ (default: PATH LD_LIBRARY ...) │ -│ --extra-env-vars [STR [STR ...]] │ -│ (default: ) │ -│ --env-file {None}|STR|PATH │ -│ (default: None) │ -╰───────────────────────────────────────────────────────╯ -``` - -## SLURM integration - -By default, the `hostnames` or `workers_per_host` arguments are populated from the current SLURM allocation. If no allocation is detected, we assume 1 machine (localhost) with N workers (num. GPUs or CPUs). -Raises a `RuntimeError` if `hostnames="slurm"` or `workers_per_host="slurm"` but no allocation is detected. - -## Propagating exceptions - -Exceptions that are raised in workers will be raised by the launcher process. - -A {mod}`torchrunx.AgentFailedError` or {mod}`torchrunx.WorkerFailedError` will be raised if any agent or worker dies unexpectedly (e.g. if sent a signal from the OS, due to segmentation faults or OOM). - -## Environment variables - -Environment variables in the launcher process that match the `default_env_vars` argument are automatically copied to agents and workers. We set useful defaults for Python and PyTorch. Environment variables are pattern-matched with this list using `fnmatch`. - -`default_env_vars` can be overriden if desired. This list can be augmented using `extra_env_vars`. Additional environment variables (and more custom bash logic) can be included via the `env_file` argument. Our agents `source` this file. - -We also set the following environment variables in each worker: `LOCAL_RANK`, `RANK`, `LOCAL_WORLD_SIZE`, `WORLD_SIZE`, `MASTER_ADDR`, and `MASTER_PORT`. - -## Custom logging - -We forward all logs (i.e. from {mod}`logging` and {mod}`sys.stdout`/{mod}`sys.stderr`) from workers and agents to the launcher. By default, the logs from the first agent and its first worker are printed into the launcher's `stdout` stream. Logs from all agents and workers are written to files in `$TORCHRUNX_LOG_DIR` (default: `./torchrunx_logs`) and are named by timestamp, hostname, and local_rank. - -{mod}`logging.Handler` objects can be provided via the `handler_factory` argument to provide further customization (mapping specific agents/workers to custom output streams). You must pass a function that returns a list of {mod}`logging.Handler`s to ``handler_factory``. - -We provide some utilities to help: - -```{eval-rst} -.. autofunction:: torchrunx.file_handler -``` - -```{eval-rst} -.. autofunction:: torchrunx.stream_handler -``` - -```{eval-rst} -.. autofunction:: torchrunx.add_filter_to_handler -``` diff --git a/docs/source/features/cli.md b/docs/source/features/cli.md new file mode 100644 index 00000000..bce898f9 --- /dev/null +++ b/docs/source/features/cli.md @@ -0,0 +1,38 @@ +# CLI Integration + +We can use {mod}`torchrunx.Launcher` to populate arguments from the CLI (e.g. with [tyro](https://brentyi.github.io/tyro/)): + +```python +import torchrunx as trx +import tyro + +def distributed_function(): + pass + +if __name__ == "__main__": + launcher = tyro.cli(trx.Launcher) + launcher.run(distributed_function) +``` + +`python ... --help` then results in: + +```bash +╭─ options ─────────────────────────────────────────────╮ +│ -h, --help show this help message and exit │ +│ --hostnames {[STR [STR ...]]}|{auto,slurm} │ +│ (default: auto) │ +│ --workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ +│ (default: auto) │ +│ --ssh-config-file {None}|STR|PATH │ +│ (default: None) │ +│ --backend {None,nccl,gloo,mpi,ucc,auto} │ +│ (default: auto) │ +│ --timeout INT (default: 600) │ +│ --default-env-vars [STR [STR ...]] │ +│ (default: PATH LD_LIBRARY ...) │ +│ --extra-env-vars [STR [STR ...]] │ +│ (default: ) │ +│ --env-file {None}|STR|PATH │ +│ (default: None) │ +╰───────────────────────────────────────────────────────╯ +``` diff --git a/docs/source/features/customization.md b/docs/source/features/customization.md new file mode 100644 index 00000000..2c1a0348 --- /dev/null +++ b/docs/source/features/customization.md @@ -0,0 +1,35 @@ +# Customization + +## Propagating exceptions + +Exceptions that are raised in workers will be raised by the launcher process. + +A {mod}`torchrunx.AgentFailedError` or {mod}`torchrunx.WorkerFailedError` will be raised if any agent or worker dies unexpectedly (e.g. if sent a signal from the OS, due to segmentation faults or OOM). + +## Environment variables + +Environment variables in the launcher process that match the `default_env_vars` argument are automatically copied to agents and workers. We set useful defaults for Python and PyTorch. Environment variables are pattern-matched with this list using `fnmatch`. + +`default_env_vars` can be overriden if desired. This list can be augmented using `extra_env_vars`. Additional environment variables (and more custom bash logic) can be included via the `env_file` argument. Our agents `source` this file. + +We also set the following environment variables in each worker: `LOCAL_RANK`, `RANK`, `LOCAL_WORLD_SIZE`, `WORLD_SIZE`, `MASTER_ADDR`, and `MASTER_PORT`. + +## Logging + +We forward all logs (i.e. from {mod}`logging` and {mod}`sys.stdout`/{mod}`sys.stderr`) from workers and agents to the launcher. By default, the logs from the first agent and its first worker are printed into the launcher's `stdout` stream. Logs from all agents and workers are written to files in `$TORCHRUNX_LOG_DIR` (default: `./torchrunx_logs`) and are named by timestamp, hostname, and local_rank. + +{mod}`logging.Handler` objects can be provided via the `handler_factory` argument to provide further customization (mapping specific agents/workers to custom output streams). You must pass a function that returns a list of {mod}`logging.Handler`s to ``handler_factory``. + +We provide some utilities to help: + +```{eval-rst} +.. autofunction:: torchrunx.file_handler +``` + +```{eval-rst} +.. autofunction:: torchrunx.stream_handler +``` + +```{eval-rst} +.. autofunction:: torchrunx.add_filter_to_handler +``` diff --git a/docs/source/features/slurm.md b/docs/source/features/slurm.md new file mode 100644 index 00000000..bffc4512 --- /dev/null +++ b/docs/source/features/slurm.md @@ -0,0 +1,4 @@ +# SLURM Integration + +By default, the `hostnames` or `workers_per_host` arguments are populated from the current SLURM allocation. If no allocation is detected, we assume 1 machine (localhost) with N workers (num. GPUs or CPUs). +Raises a `RuntimeError` if `hostnames="slurm"` or `workers_per_host="slurm"` but no allocation is detected. diff --git a/docs/source/features/workflows.md b/docs/source/features/workflows.md new file mode 100644 index 00000000..46e5d032 --- /dev/null +++ b/docs/source/features/workflows.md @@ -0,0 +1,26 @@ +# Workflows + +## Multiple functions in one script + +We could also launch multiple functions (e.g. train on many GPUs, test on one GPU): + +```python +import torchrunx as trx + +trained_model = trx.launch( + func=train, + hostnames=["node1", "node2"], + workers_per_host=8 +).rank(0) + +accuracy = trx.launch( + func=test, + func_args=(trained_model,), + hostnames=["localhost"], + workers_per_host=1 +).rank(0) + +print(f'Accuracy: {accuracy}') +``` + +{mod}`torchrunx.launch` is self-cleaning: all processes are terminated (and the used memory is completely released) before the subsequent invocation. diff --git a/docs/source/index.rst b/docs/source/index.rst index 460ba691..cbeb9236 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -5,10 +5,18 @@ :hidden: api - advanced how_it_works contributing +.. toctree:: + :caption: Features + :hidden: + + ./features/customization.md + ./features/workflows.md + ./features/cli.md + ./features/slurm.md + .. toctree:: :caption: Examples :hidden: From 0e5ec8a5d134f320e21df508523f5f8cc5f68a3d Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 9 Feb 2025 15:02:55 -0500 Subject: [PATCH 100/141] rename to logging utilities; log to timestamp folder --- src/torchrunx/__init__.py | 2 +- src/torchrunx/agent.py | 2 +- src/torchrunx/launcher.py | 5 +++-- .../{logging_server.py => logging_utilities.py} | 12 +++++------- src/torchrunx/worker.py | 2 +- 5 files changed, 11 insertions(+), 12 deletions(-) rename src/torchrunx/utils/{logging_server.py => logging_utilities.py} (96%) diff --git a/src/torchrunx/__init__.py b/src/torchrunx/__init__.py index 2590ef41..f7fbef8d 100644 --- a/src/torchrunx/__init__.py +++ b/src/torchrunx/__init__.py @@ -2,7 +2,7 @@ from .launcher import Launcher, LaunchResult, launch from .utils.errors import AgentFailedError, WorkerFailedError -from .utils.logging_server import add_filter_to_handler, file_handler, stream_handler +from .utils.logging_utilities import add_filter_to_handler, file_handler, stream_handler __all__ = [ "AgentFailedError", diff --git a/src/torchrunx/agent.py b/src/torchrunx/agent.py index 811a9516..264a64b8 100644 --- a/src/torchrunx/agent.py +++ b/src/torchrunx/agent.py @@ -19,7 +19,7 @@ LauncherAgentGroup, get_open_port, ) -from .utils.logging_server import log_records_to_socket, redirect_stdio_to_logger +from .utils.logging_utilities import log_records_to_socket, redirect_stdio_to_logger from .worker import WorkerArgs, worker_entrypoint diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 274837c0..f1d6434f 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -22,6 +22,7 @@ import fabric import torch.distributed as dist +from typing_extensions import Self from .utils.comm import ( LauncherAgentGroup, @@ -33,7 +34,7 @@ ExceptionFromWorker, WorkerFailedError, ) -from .utils.logging_server import LoggingServerArgs, start_logging_server +from .utils.logging_utilities import LoggingServerArgs, start_logging_server def launch( @@ -139,7 +140,7 @@ def __post_init__(self) -> None: def set_handler_factory( self, factory: Callable[[], list[Handler]] | Literal["auto"] | None - ) -> Launcher: + ) -> Self: """Setter for log handler factory.""" self.handler_factory = factory return self diff --git a/src/torchrunx/utils/logging_server.py b/src/torchrunx/utils/logging_utilities.py similarity index 96% rename from src/torchrunx/utils/logging_server.py rename to src/torchrunx/utils/logging_utilities.py index 6e7ea404..ca7b2ded 100644 --- a/src/torchrunx/utils/logging_server.py +++ b/src/torchrunx/utils/logging_utilities.py @@ -102,20 +102,18 @@ def file_handlers( ) -> list[Handler]: """Handler builder function for writing logs for all workers/agents to a directory. - Files are named with timestamp, hostname, and the local_rank (for workers). + Files are named with hostname and the local_rank (for workers). """ handlers = [] - Path(log_dir).mkdir(parents=True, exist_ok=True) timestamp = datetime.datetime.now().isoformat(timespec="seconds") + log_dir = Path(log_dir) / timestamp + log_dir.mkdir(parents=True, exist_ok=True) for hostname, num_workers in zip(hostnames, workers_per_host): for local_rank in [None, *range(num_workers)]: - file_path = ( - f"{log_dir}/{timestamp}-{hostname}" - + (f"[{local_rank}]" if local_rank is not None else "") - + ".log" - ) + local_rank_str = f"[{local_rank}]" if local_rank is not None else "" + file_path = log_dir / f"{hostname}{local_rank_str}.log" handlers.append(file_handler(hostname, local_rank, file_path, log_level=log_level)) return handlers diff --git a/src/torchrunx/worker.py b/src/torchrunx/worker.py index 897346a3..cb246f5b 100644 --- a/src/torchrunx/worker.py +++ b/src/torchrunx/worker.py @@ -15,7 +15,7 @@ import torch.distributed as dist from .utils.errors import ExceptionFromWorker -from .utils.logging_server import log_records_to_socket, redirect_stdio_to_logger +from .utils.logging_utilities import log_records_to_socket, redirect_stdio_to_logger __all__ = ["WorkerArgs", "worker_entrypoint"] From 26414e68aad7580f78b2288fe28abff9c7f6d39a Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 9 Feb 2025 17:32:07 -0500 Subject: [PATCH 101/141] update slurm env vars --- src/torchrunx/utils/environment.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/torchrunx/utils/environment.py b/src/torchrunx/utils/environment.py index 0df28e8c..2d3e9f67 100644 --- a/src/torchrunx/utils/environment.py +++ b/src/torchrunx/utils/environment.py @@ -12,17 +12,17 @@ def in_slurm_job() -> bool: """Check if current process is running in a Slurm allocation.""" - return "SLURM_JOB_ID" in os.environ + return "SLURM_JOB_ID" in os.environ or "SLURM_JOBID" in os.environ def slurm_hosts() -> list[str]: """Retrieves hostnames of Slurm-allocated nodes.""" - # TODO: sanity check SLURM variables, commands if not in_slurm_job(): msg = "Not in a SLURM job" raise RuntimeError(msg) + return ( - subprocess.check_output(["scontrol", "show", "hostnames", os.environ["SLURM_JOB_NODELIST"]]) + subprocess.check_output(["scontrol", "show", "hostnames"]) .decode() .strip() .split("\n") From 76e1d723a11e9522e4c667c9fe0fb762ec4f3d13 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 9 Feb 2025 18:24:20 -0500 Subject: [PATCH 102/141] manually detect number of gpus (workers) per host --- src/torchrunx/launcher.py | 62 ++++++++++++++++++++---------- src/torchrunx/utils/environment.py | 48 ++++------------------- tests/test_func.py | 1 - tests/test_submitit.py | 2 +- 4 files changed, 50 insertions(+), 63 deletions(-) diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index f1d6434f..a60b936d 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -29,7 +29,7 @@ LauncherPayload, get_open_port, ) -from .utils.environment import auto_hosts, auto_workers, slurm_hosts, slurm_workers +from .utils.environment import auto_hosts, slurm_hosts from .utils.errors import ( ExceptionFromWorker, WorkerFailedError, @@ -43,7 +43,7 @@ def launch( kwargs: dict[str, Any] | None = None, *, hostnames: list[str] | Literal["auto", "slurm"] = "auto", - workers_per_host: int | list[int] | Literal["auto", "slurm"] = "auto", + workers_per_host: int | list[int] | Literal["auto"] = "auto", ssh_config_file: str | os.PathLike | None = None, backend: Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None = "auto", timeout: int = 600, @@ -71,6 +71,7 @@ def launch( hostnames: Nodes on which to launch the function. Default: infer from localhost or SLURM. workers_per_host: Number of processes to run (e.g. # of GPUs) per node. + Default: use number of GPUs on each host. ssh_config_file: Path to an SSH configuration file for connecting to nodes. Default: ``~/.ssh/config`` or ``/etc/ssh/ssh_config``. backend: `Backend `_ @@ -86,9 +87,10 @@ def launch( Raises: RuntimeError: If there are configuration issues. - AgentFailedError: If an agent fails, e.g. from an OS signal. - WorkerFailedError: If a worker fails, e.g. from a segmentation fault. Exception: Any exception raised in a worker process is propagated. + WorkerFailedError: If a worker fails (e.g. from a segmentation fault) + or raises an exception and ``propagate_exceptions=False``. + AgentFailedError: If an agent fails, e.g. from an OS signal. """ return ( Launcher( @@ -116,7 +118,7 @@ class Launcher: """Useful for sequential invocations or for specifying arguments via CLI.""" hostnames: list[str] | Literal["auto", "slurm"] = "auto" - workers_per_host: int | list[int] | Literal["auto", "slurm"] = "auto" + workers_per_host: int | list[int] | Literal["auto"] = "auto" ssh_config_file: str | os.PathLike | None = None backend: Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None = "auto" timeout: int = 600 @@ -156,8 +158,8 @@ def run( # noqa: C901, PLR0912 msg = "The torch.distributed package is not available." raise RuntimeError(msg) - hostnames = _resolve_hostnames(self.hostnames) - workers_per_host = _resolve_workers_per_host(self.workers_per_host, len(hostnames)) + hostnames: list[str] = _resolve_hostnames(self.hostnames) + workers_per_host: list[int] = _resolve_workers_per_host(hostnames, self.workers_per_host) launcher_hostname = socket.getfqdn() launcher_port = get_open_port() @@ -310,19 +312,23 @@ def _resolve_hostnames(hostnames: list[str] | Literal["auto", "slurm"]) -> list[ def _resolve_workers_per_host( - workers_per_host: int | list[int] | Literal["auto", "slurm"], - num_hosts: int, + hostnames: list[str], + workers_per_host: int | list[int] | Literal["auto"], ) -> list[int]: - if workers_per_host == "auto": - workers_per_host = auto_workers() - elif workers_per_host == "slurm": - workers_per_host = slurm_workers() - if isinstance(workers_per_host, int): - workers_per_host = [workers_per_host] * num_hosts - elif len(workers_per_host) != num_hosts: - msg = "len(workers_per_host) != len(hostnames)" - raise ValueError(msg) + return [workers_per_host] * len(hostnames) + + if workers_per_host == "auto": + python = shlex.quote(sys.executable) + command = f"{python} -c \"import torch; print(torch.cuda.device_count(), end='')\"" + gpus_per_host = [ + int(_execute_command(command, hostname, return_stdout_stderr=True)[0]) + for hostname in hostnames + ] + if any(g == 0 for g in gpus_per_host): + msg = 'workers_per_host="auto", but no GPUs detected on at least one host.' + raise RuntimeError(msg) + return gpus_per_host return workers_per_host @@ -372,8 +378,10 @@ def _build_launch_command( def _execute_command( command: str, hostname: str, + *, ssh_config_file: str | os.PathLike | None = None, -) -> None: + return_stdout_stderr: bool = False, +) -> tuple[str, str]: is_localhost = True _hostname_or_ip = hostname try: @@ -389,7 +397,13 @@ def _execute_command( if is_localhost: # S602: subprocess.Popen is called with shell=True (https://docs.python.org/3.9/library/subprocess.html#security-considerations) # Made sure to shlex.quote arguments in build_command to prevent shell injection - subprocess.Popen(command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # noqa: S602 + process = subprocess.Popen( # noqa: S602 + command, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + + if return_stdout_stderr: + stdout, stderr = process.communicate() + return stdout, stderr else: runtime_ssh_path = ssh_config_file if isinstance(ssh_config_file, os.PathLike): @@ -399,4 +413,10 @@ def _execute_command( host=hostname, config=fabric.Config(runtime_ssh_path=runtime_ssh_path), ) as conn: - conn.run(f"{command} >> /dev/null 2>&1 &", asynchronous=True) + promise = conn.run(command, asynchronous=True, hide=True) + + if return_stdout_stderr: + results = promise.join() + return results.stdout, results.stderr + + return ("", "") diff --git a/src/torchrunx/utils/environment.py b/src/torchrunx/utils/environment.py index 2d3e9f67..ca5f0e7c 100644 --- a/src/torchrunx/utils/environment.py +++ b/src/torchrunx/utils/environment.py @@ -2,12 +2,17 @@ from __future__ import annotations -__all__ = ["auto_hosts", "auto_workers", "in_slurm_job", "slurm_hosts", "slurm_workers"] +__all__ = ["auto_hosts", "in_slurm_job", "slurm_hosts"] import os import subprocess -import torch + +def auto_hosts() -> list[str]: + """Automatically determine hostnames to launch to.""" + if in_slurm_job(): + return slurm_hosts() + return ["localhost"] def in_slurm_job() -> bool: @@ -21,41 +26,4 @@ def slurm_hosts() -> list[str]: msg = "Not in a SLURM job" raise RuntimeError(msg) - return ( - subprocess.check_output(["scontrol", "show", "hostnames"]) - .decode() - .strip() - .split("\n") - ) - - -def slurm_workers() -> int: - """Determines number of workers per node in current Slurm allocation.""" - # TODO: sanity check SLURM variables, commands - if not in_slurm_job(): - msg = "Not in a SLURM job" - raise RuntimeError(msg) - - if "SLURM_JOB_GPUS" in os.environ: - # TODO: is it possible to allocate uneven GPUs across nodes? - return len(os.environ["SLURM_JOB_GPUS"].split(",")) - if "SLURM_GPUS_PER_NODE" in os.environ: - return int(os.environ["SLURM_GPUS_PER_NODE"]) - - return int(os.environ["SLURM_CPUS_ON_NODE"]) - - -def auto_hosts() -> list[str]: - """Automatically determine hostnames to launch to.""" - if in_slurm_job(): - return slurm_hosts() - - return ["localhost"] - - -def auto_workers() -> int: - """Automatically determine workers per host from SLURM or based on GPU/CPU count.""" - if in_slurm_job(): - return slurm_workers() - - return torch.cuda.device_count() or os.cpu_count() or 1 + return subprocess.check_output(["scontrol", "show", "hostnames"]).decode().strip().split("\n") diff --git a/tests/test_func.py b/tests/test_func.py index 3f4481ca..f0474b98 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -12,7 +12,6 @@ def test_launch() -> None: result = trx.launch( func=simple_matmul, hostnames="slurm", - workers_per_host="slurm", ) result_values = reduce(add, result.results.values()) diff --git a/tests/test_submitit.py b/tests/test_submitit.py index 500665ca..1f639df3 100644 --- a/tests/test_submitit.py +++ b/tests/test_submitit.py @@ -53,7 +53,7 @@ def main() -> None: def launch() -> None: - trx.launch(main, hostnames="slurm", workers_per_host="slurm") + trx.launch(main, hostnames="slurm") def test_submitit() -> None: From ab8cbd4bb9f960e830bfc2220cb6917140bd292a Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 10 Feb 2025 01:20:07 -0500 Subject: [PATCH 103/141] switch to sphinx.ext.autodoc --- .gitignore | 1 + docs/conf.py | 18 +++++----- docs/source/api.md | 8 ++--- docs/source/contributing.md | 4 --- docs/source/features/customization.md | 6 ++-- pyproject.toml | 2 +- src/torchrunx/__init__.py | 4 --- src/torchrunx/agent.py | 2 +- src/torchrunx/launcher.py | 30 ++++++++++------ src/torchrunx/utils/__init__.py | 4 +++ .../{logging_utilities.py => logging.py} | 2 +- src/torchrunx/worker.py | 2 +- uv.lock | 36 +++---------------- 13 files changed, 47 insertions(+), 72 deletions(-) delete mode 100644 docs/source/contributing.md rename src/torchrunx/utils/{logging_utilities.py => logging.py} (99%) diff --git a/.gitignore b/.gitignore index 2566ec82..952cfa2c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ docs/source/README.md +docs/source/contributing.md torchrunx_logs/ .pixi/ .ruff_cache/ diff --git a/docs/conf.py b/docs/conf.py index 04a756fb..8de5c006 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,11 +1,12 @@ """Configuration file for the Sphinx documentation builder.""" - from glob import glob import shutil shutil.copyfile("../README.md", "source/README.md") +shutil.copyfile("../CONTRIBUTING.md", "source/contributing.md") project = "torchrunx" +copyright = 'Apoorv Khandelwal and Peter Curtin' github_username = "apoorvkh" github_repository = "torchrunx" html_theme = "furo" @@ -14,7 +15,7 @@ html_extra_path = list(glob("source/examples/scripts/*.py")) extensions = [ - "autodoc2", + "sphinx.ext.autodoc", "myst_parser", # support markdown "sphinx.ext.intersphinx", # link to external docs "sphinx.ext.napoleon", # for google style docstrings @@ -23,14 +24,11 @@ "sphinx_toolbox.github", ] -maximum_signature_line_length = 100 -autodoc2_render_plugin = "myst" -intersphinx_mapping = { - "python": ("https://docs.python.org/3.9", None), - "fabric": ("https://docs.fabfile.org/en/stable", None), - 'torch': ('https://pytorch.org/docs/stable', None), - "numpy": ("https://numpy.org/doc/stable", None), -} +autodoc_member_order = "bysource" +autodoc_typehints = "description" +autodoc_typehints_description_target = "documented" + +intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} from docs.linkcode_github import generate_linkcode_resolve_fn linkcode_resolve = generate_linkcode_resolve_fn(project, github_username, github_repository) diff --git a/docs/source/api.md b/docs/source/api.md index d4774af0..95be4c9a 100644 --- a/docs/source/api.md +++ b/docs/source/api.md @@ -1,10 +1,10 @@ # API ```{eval-rst} -.. autofunction:: torchrunx.launch +.. autofunction:: torchrunx.launch(func, args, kwargs, ...) ``` -We provide the {mod}`torchrunx.Launcher` class as an alias to {mod}`torchrunx.launch`. +We provide the {obj}`torchrunx.Launcher` class as an alias to {obj}`torchrunx.launch`. ```{eval-rst} .. autoclass:: torchrunx.Launcher @@ -21,9 +21,9 @@ We provide the {mod}`torchrunx.Launcher` class as an alias to {mod}`torchrunx.la ## Exceptions ```{eval-rst} -.. autoclass:: torchrunx.AgentFailedError +.. autoexception:: torchrunx.AgentFailedError ``` ```{eval-rst} -.. autoclass:: torchrunx.WorkerFailedError +.. autoexception:: torchrunx.WorkerFailedError ``` diff --git a/docs/source/contributing.md b/docs/source/contributing.md deleted file mode 100644 index 5d3d3c56..00000000 --- a/docs/source/contributing.md +++ /dev/null @@ -1,4 +0,0 @@ -```{eval-rst} -.. include:: ../../CONTRIBUTING.md - :parser: myst_parser.sphinx_ -``` diff --git a/docs/source/features/customization.md b/docs/source/features/customization.md index 2c1a0348..cdd95708 100644 --- a/docs/source/features/customization.md +++ b/docs/source/features/customization.md @@ -23,13 +23,13 @@ We forward all logs (i.e. from {mod}`logging` and {mod}`sys.stdout`/{mod}`sys.st We provide some utilities to help: ```{eval-rst} -.. autofunction:: torchrunx.file_handler +.. autofunction:: torchrunx.utils.file_handler ``` ```{eval-rst} -.. autofunction:: torchrunx.stream_handler +.. autofunction:: torchrunx.utils.stream_handler ``` ```{eval-rst} -.. autofunction:: torchrunx.add_filter_to_handler +.. autofunction:: torchrunx.utils.add_filter_to_handler ``` diff --git a/pyproject.toml b/pyproject.toml index 58b2399a..8c781a71 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,7 +25,7 @@ dependencies = [ [dependency-groups] dev = ["ruff==0.9.5", "pyright[nodejs]==1.1.393", "pytest==8.3.4"] test-extras = ["submitit", "transformers"] -docs = ["sphinx==7.4.7", "furo==2024.8.6", "myst-parser==3.0.1", "sphinx-autodoc2==0.5.0", "sphinx-toolbox==3.8.1"] +docs = ["sphinx==7.4.7", "furo==2024.8.6", "myst-parser==3.0.1", "sphinx-toolbox==3.8.2"] [tool.ruff] diff --git a/src/torchrunx/__init__.py b/src/torchrunx/__init__.py index f7fbef8d..3856f589 100644 --- a/src/torchrunx/__init__.py +++ b/src/torchrunx/__init__.py @@ -2,15 +2,11 @@ from .launcher import Launcher, LaunchResult, launch from .utils.errors import AgentFailedError, WorkerFailedError -from .utils.logging_utilities import add_filter_to_handler, file_handler, stream_handler __all__ = [ "AgentFailedError", "LaunchResult", "Launcher", "WorkerFailedError", - "add_filter_to_handler", - "file_handler", "launch", - "stream_handler", ] diff --git a/src/torchrunx/agent.py b/src/torchrunx/agent.py index 264a64b8..d21dbf3a 100644 --- a/src/torchrunx/agent.py +++ b/src/torchrunx/agent.py @@ -19,7 +19,7 @@ LauncherAgentGroup, get_open_port, ) -from .utils.logging_utilities import log_records_to_socket, redirect_stdio_to_logger +from .utils.logging import log_records_to_socket, redirect_stdio_to_logger from .worker import WorkerArgs, worker_entrypoint diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index a60b936d..adb7dd17 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -34,7 +34,7 @@ ExceptionFromWorker, WorkerFailedError, ) -from .utils.logging_utilities import LoggingServerArgs, start_logging_server +from .utils.logging import LoggingServerArgs, start_logging_server def launch( @@ -65,25 +65,33 @@ def launch( """Distribute and parallelize a function onto specified nodes and workers. Arguments: - func: Function to launch on each node and replicate for each worker. - args: Positional arguments for ``func``. - kwargs: Keyword arguments for ``func``. + func: Function to replicate on each node/worker. + args: Positional arguments for ``func``. Default: :py:obj:`None`. + kwargs: Keyword arguments for ``func``. Default: :py:obj:`None`. hostnames: Nodes on which to launch the function. - Default: infer from localhost or SLURM. + Default: ``"auto"`` (infer from localhost or SLURM). workers_per_host: Number of processes to run (e.g. # of GPUs) per node. - Default: use number of GPUs on each host. + Default: ``"auto"`` (number of GPUs per host). ssh_config_file: Path to an SSH configuration file for connecting to nodes. - Default: ``~/.ssh/config`` or ``/etc/ssh/ssh_config``. + Default: ``"~/.ssh/config"`` or ``"/etc/ssh/ssh_config"``. backend: `Backend `_ - for worker process group. Set `None` to disable. Default: NCCL (GPU) or GLOO (CPU). + for worker process group. Set `None` to disable. + Default: ``"auto"`` (NCCL if GPU or GLOO if CPU). timeout: Worker process group timeout (seconds). + Default: ``600``. default_env_vars: Environment variables to copy from the launcher process to workers. Supports bash pattern matching syntax. + Default: ``("PATH", "LD_LIBRARY", "LIBRARY_PATH", "PYTHON*", "CUDA*", "TORCH*", + "PYTORCH*", "NCCL*")``. extra_env_vars: Additional user-specified environment variables to copy. + Default: ``()``. env_file: Path to a file (e.g., ``.env``) with additional environment variables to copy. + Default: :py:obj:`None`. propagate_exceptions: Raise exceptions from worker processes in the launcher. - If false, raises :obj:`WorkerFailedError` instead. + If false, raises :exc:`WorkerFailedError` instead. + Default: :py:obj:`True`. handler_factory: Function to customize processing of agent and worker logs with handlers. + Default: ``"auto"`` (see `custom logging `_). Raises: RuntimeError: If there are configuration issues. @@ -115,7 +123,7 @@ def launch( @dataclass class Launcher: - """Useful for sequential invocations or for specifying arguments via CLI.""" + """Alias class for :func:`launch`. Refer to that function for documentation.""" hostnames: list[str] | Literal["auto", "slurm"] = "auto" workers_per_host: int | list[int] | Literal["auto"] = "auto" @@ -153,7 +161,7 @@ def run( # noqa: C901, PLR0912 args: tuple | None = None, kwargs: dict[str, Any] | None = None, ) -> LaunchResult: - """Run a function using the :mod:`torchrunx.Launcher` configuration.""" + """Launch a function using class configuration.""" if not dist.is_available(): msg = "The torch.distributed package is not available." raise RuntimeError(msg) diff --git a/src/torchrunx/utils/__init__.py b/src/torchrunx/utils/__init__.py index b2aa5714..dc4af98f 100644 --- a/src/torchrunx/utils/__init__.py +++ b/src/torchrunx/utils/__init__.py @@ -1 +1,5 @@ """Utility classes and functions.""" + +from .logging import add_filter_to_handler, file_handler, stream_handler + +__all__ = ["add_filter_to_handler", "file_handler", "stream_handler"] diff --git a/src/torchrunx/utils/logging_utilities.py b/src/torchrunx/utils/logging.py similarity index 99% rename from src/torchrunx/utils/logging_utilities.py rename to src/torchrunx/utils/logging.py index ca7b2ded..64c20e18 100644 --- a/src/torchrunx/utils/logging_utilities.py +++ b/src/torchrunx/utils/logging.py @@ -1,4 +1,4 @@ -"""Utilities for intercepting logs in worker processes and handling these in the Launcher.""" +"""Utilities for intercepting logs in worker processes and handling these in the Launcher.""" # noqa: A005 from __future__ import annotations diff --git a/src/torchrunx/worker.py b/src/torchrunx/worker.py index cb246f5b..e7307520 100644 --- a/src/torchrunx/worker.py +++ b/src/torchrunx/worker.py @@ -15,7 +15,7 @@ import torch.distributed as dist from .utils.errors import ExceptionFromWorker -from .utils.logging_utilities import log_records_to_socket, redirect_stdio_to_logger +from .utils.logging import log_records_to_socket, redirect_stdio_to_logger __all__ = ["WorkerArgs", "worker_entrypoint"] diff --git a/uv.lock b/uv.lock index 4277ed3c..e2442610 100644 --- a/uv.lock +++ b/uv.lock @@ -44,18 +44,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/77/9f/fa9971d2a0c6fef64c87ba362a493a4f230eff4ea8dfb9f4c7cbdf71892e/apeye_core-1.1.5-py3-none-any.whl", hash = "sha256:dc27a93f8c9e246b3b238c5ea51edf6115ab2618ef029b9f2d9a190ec8228fbf", size = 99286 }, ] -[[package]] -name = "astroid" -version = "3.3.8" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/80/c5/5c83c48bbf547f3dd8b587529db7cf5a265a3368b33e85e76af8ff6061d3/astroid-3.3.8.tar.gz", hash = "sha256:a88c7994f914a4ea8572fac479459f4955eeccc877be3f2d959a33273b0cf40b", size = 398196 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/07/28/0bc8a17d6cd4cc3c79ae41b7105a2b9a327c110e5ddd37a8a27b29a5c8a2/astroid-3.3.8-py3-none-any.whl", hash = "sha256:187ccc0c248bfbba564826c26f070494f7bc964fd286b6d9fff4420e55de828c", size = 275153 }, -] - [[package]] name = "autodocsumm" version = "0.2.14" @@ -1518,20 +1506,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a0/f3/e0a4ce49da4b6f4e4ce84b3c39a0677831884cb9d8a87ccbf1e9e56e53ac/sphinx_autodoc_typehints-2.3.0-py3-none-any.whl", hash = "sha256:3098e2c6d0ba99eacd013eb06861acc9b51c6e595be86ab05c08ee5506ac0c67", size = 19836 }, ] -[[package]] -name = "sphinx-autodoc2" -version = "0.5.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "astroid" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/17/5f/5350046d1aa1a56b063ae08b9ad871025335c9d55fe2372896ea48711da9/sphinx_autodoc2-0.5.0.tar.gz", hash = "sha256:7d76044aa81d6af74447080182b6868c7eb066874edc835e8ddf810735b6565a", size = 115077 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/19/e6/48d47961bbdae755ba9c17dfc65d89356312c67668dcb36c87cfadfa1964/sphinx_autodoc2-0.5.0-py3-none-any.whl", hash = "sha256:e867013b1512f9d6d7e6f6799f8b537d6884462acd118ef361f3f619a60b5c9e", size = 43385 }, -] - [[package]] name = "sphinx-basic-ng" version = "1.0.0b2" @@ -1588,7 +1562,7 @@ wheels = [ [[package]] name = "sphinx-toolbox" -version = "3.8.1" +version = "3.8.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "apeye" }, @@ -1609,9 +1583,9 @@ dependencies = [ { name = "tabulate" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/30/80/f837e85c8c216cdeef9b60393e4b00c9092a1e3d734106e0021abbf5930c/sphinx_toolbox-3.8.1.tar.gz", hash = "sha256:a4b39a6ea24fc8f10e24f052199bda17837a0bf4c54163a56f521552395f5e1a", size = 111977 } +sdist = { url = "https://files.pythonhosted.org/packages/39/91/61445ccb49f653f706230daf874f0fb4385c748f8e221f44881717c80c1d/sphinx_toolbox-3.8.2.tar.gz", hash = "sha256:2d65b9cee1d313c84cdb01317764791b1bdfbd6a1e6aa056e464137d18a6b9eb", size = 111998 } wheels = [ - { url = "https://files.pythonhosted.org/packages/8a/d6/2a28ee4cbc158ae65afb2cfcb6895ef54d972ce1e167f8a63c135b14b080/sphinx_toolbox-3.8.1-py3-none-any.whl", hash = "sha256:53d8e77dd79e807d9ef18590c4b2960a5aa3c147415054b04c31a91afed8b88b", size = 194621 }, + { url = "https://files.pythonhosted.org/packages/e3/c4/be739b65467ff53bef26fc36478dbb0e01e289375983338d2247a8c2f87e/sphinx_toolbox-3.8.2-py3-none-any.whl", hash = "sha256:c65d03274f07a6bfb3f56123ae40e1e003a49709b7cda9b75d9e53e569d34581", size = 194693 }, ] [[package]] @@ -1848,7 +1822,6 @@ docs = [ { name = "furo" }, { name = "myst-parser" }, { name = "sphinx" }, - { name = "sphinx-autodoc2" }, { name = "sphinx-toolbox" }, ] test-extras = [ @@ -1874,8 +1847,7 @@ docs = [ { name = "furo", specifier = "==2024.8.6" }, { name = "myst-parser", specifier = "==3.0.1" }, { name = "sphinx", specifier = "==7.4.7" }, - { name = "sphinx-autodoc2", specifier = "==0.5.0" }, - { name = "sphinx-toolbox", specifier = "==3.8.1" }, + { name = "sphinx-toolbox", specifier = "==3.8.2" }, ] test-extras = [ { name = "submitit" }, From cb4cd4a2be3042203f886b586561ca2e313b89ee Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Tue, 11 Feb 2025 00:02:19 -0500 Subject: [PATCH 104/141] fix ci test: new log structure --- tests/test_ci.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/tests/test_ci.py b/tests/test_ci.py index d2ac114c..98cce6ff 100644 --- a/tests/test_ci.py +++ b/tests/test_ci.py @@ -1,5 +1,7 @@ +import datetime import os import tempfile +import time from pathlib import Path from typing import NoReturn @@ -49,18 +51,33 @@ def dist_func() -> None: num_workers = 2 + before_timestamp = datetime.datetime.now() + + time.sleep(1) + trx.launch( dist_func, workers_per_host=num_workers, backend="gloo", ) - log_files = next(os.walk(tmp), (None, None, []))[2] + after_timestamp = datetime.datetime.now() + + log_dirs = next(os.walk(tmp), (None, [], None))[1] + + assert len(log_dirs) == 1 + + # this should error if mis-formatted + log_timestamp = datetime.datetime.fromisoformat(log_dirs[0]) + + assert before_timestamp <= log_timestamp <= after_timestamp + + log_files = next(os.walk(f"{tmp}/{log_dirs[0]}"), (None, None, []))[2] assert len(log_files) == num_workers + 1 for file in log_files: - with Path(f"{tmp}/{file}").open() as f: + with Path(f"{tmp}/{log_dirs[0]}/{file}").open() as f: contents = f.read() print(contents) if file.endswith("[0].log"): From b6ff53e1f9ea7e1fc52186e7c86f610ec3fa5080 Mon Sep 17 00:00:00 2001 From: "peter_curtin@brown.edu" Date: Wed, 12 Feb 2025 14:45:18 -0500 Subject: [PATCH 105/141] refactor deepspeed and lightning scripts --- .../examples/scripts/accelerate_train.py | 1 - .../examples/scripts/deepspeed_train.py | 156 +++++++++++------ .../examples/scripts/lightning_train.py | 165 ++++++++++++------ 3 files changed, 216 insertions(+), 106 deletions(-) diff --git a/docs/source/examples/scripts/accelerate_train.py b/docs/source/examples/scripts/accelerate_train.py index 60047b67..d4e1ca55 100644 --- a/docs/source/examples/scripts/accelerate_train.py +++ b/docs/source/examples/scripts/accelerate_train.py @@ -3,7 +3,6 @@ # dependencies = [ # "accelerate", # "datasets", -# "tensorboard", # "torch", # "torchrunx", # "transformers", diff --git a/docs/source/examples/scripts/deepspeed_train.py b/docs/source/examples/scripts/deepspeed_train.py index 0d3ccf2a..45e19dff 100644 --- a/docs/source/examples/scripts/deepspeed_train.py +++ b/docs/source/examples/scripts/deepspeed_train.py @@ -1,88 +1,140 @@ +# /// script +# requires-python = ">=3.12" +# dependencies = [ +# "deepspeed", +# "datasets", +# "tensorboard", +# "torch", +# "torchrunx", +# "transformers", +# "tyro", +# ] +# /// + +import argparse +import functools +import os from dataclasses import dataclass -from pathlib import Path +from typing import Annotated import deepspeed +from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint import torch from datasets import load_dataset -from torch import nn from torch.utils.data import Dataset -from transformers import AutoModelForCausalLM, AutoTokenizer +from transformers import AutoModelForCausalLM, PreTrainedModel, AutoTokenizer, AutoConfig import torchrunx +import tyro -class GPT2CausalLMDataset(Dataset): - def __init__(self, text_dataset): - self.dataset = text_dataset - self.tokenizer = AutoTokenizer.from_pretrained("gpt2") - self.tokenizer.pad_token = self.tokenizer.eos_token - self.max_length = 1024 - - def __len__(self): - return len(self.dataset) - - def __getitem__(self, idx): - encoded = self.tokenizer( - self.dataset[idx]["text"], - max_length=self.max_length, - truncation=True, - padding="max_length", - return_tensors="pt", - ) +@dataclass +class ModelConfig: + name: str - input_ids = encoded.input_ids.squeeze() - attention_mask = encoded.attention_mask.squeeze() - labels = input_ids.clone() - return { - "input_ids": input_ids, - "attention_mask": attention_mask, - "labels": labels, - } +@dataclass +class DatasetConfig: + path: str + name: str | None = None + split: str | None = None + text_column: str = "text" + num_samples: int | None = None @dataclass -class DSPArgs: +class DeepSpeedArgs: deepspeed_config: str - # train_batch_size: int - # batch_size: int + local_rank: int | None = None + + +def load_training_data( + tokenizer_name: str, + dataset_config: DatasetConfig, +) -> Dataset: + # Load dataset + + dataset = load_dataset(dataset_config.path, name=dataset_config.name, split=dataset_config.split) + if dataset_config.num_samples is not None: + dataset = dataset.select(range(dataset_config.num_samples)) + + # Build tokenizer + + os.environ["TOKENIZERS_PARALLELISM"] = "false" # to suppress warnings + tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + tokenize_fn = functools.partial( + tokenizer, + max_length=tokenizer.model_max_length, + truncation=True, + padding="max_length", + ) + + # Tokenize dataset + return dataset.map( + tokenize_fn, + batched=True, + input_columns=[dataset_config.text_column], + remove_columns=[dataset_config.text_column], + ).map(lambda x: {"labels": x["input_ids"]}) -def train(): - model = AutoModelForCausalLM.from_pretrained("gpt2") - # optimizer = torch.optim.Adam(model.parameters()) - wikitext_train = load_dataset("Salesforce/wikitext", "wikitext-2-v1", split="train") - train_dataset = GPT2CausalLMDataset(text_dataset=wikitext_train) - loader = torch.utils.data.DataLoader(train_dataset, batch_size=8) +def train( + model: PreTrainedModel, + train_dataset: Dataset, + deepspeed_args: DeepSpeedArgs +) -> str: - model_engine, optimizer, _, _ = deepspeed.initialize( - args=DSPArgs(deepspeed_config="dsp_config.json"), + deepspeed_args.local_rank = int(os.environ["LOCAL_RANK"]) + + model_engine, _, loader, _ = deepspeed.initialize( + args=deepspeed_args, model=model, model_parameters=model.parameters(), + training_data=train_dataset ) - model.train() + model_engine.train() for batch_idx, batch in enumerate(loader): if batch_idx == 10: break - print(f"Step {batch_idx}") - - device_batch = {k: v.to(model.device) for k, v in batch.items()} - - model.zero_grad() + device_batch = {k: torch.stack(v, dim=0).to(model_engine.device) for k, v in batch.items()} + model_engine.zero_grad() loss = model_engine(**device_batch).loss + print(f"Step {batch_idx}, loss: {loss.item()}", flush=True, end="") model_engine.backward(loss) model_engine.step() + checkpoint_dir = "output" + model_engine.save_checkpoint(checkpoint_dir) -if __name__ == "__main__": - Path("output").mkdir(exist_ok=True) - results = torchrunx.launch( - func=train, - hostnames=["localhost"], - workers_per_host=1, + return checkpoint_dir + +def main( + launcher: torchrunx.Launcher, + model_config: Annotated[ModelConfig, tyro.conf.arg(name="model")], + dataset_config: Annotated[DatasetConfig, tyro.conf.arg(name="dataset")], + deepspeed_args: Annotated[DeepSpeedArgs, tyro.conf.arg(name="deepspeed")] +): + model = AutoModelForCausalLM.from_pretrained(model_config.name) + train_dataset = load_training_data(tokenizer_name=model_config.name, dataset_config=dataset_config) + + # Launch training + results = launcher.run(train, (model, train_dataset, deepspeed_args)) + + # Loading trained model from checkpoint + checkpoint_path = results.rank(0) + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_path) + trained_model = AutoModelForCausalLM.from_config( + AutoConfig.from_pretrained(model_config.name) ) + trained_model.load_state_dict(state_dict) + + +if __name__ == "__main__": + tyro.cli(main) diff --git a/docs/source/examples/scripts/lightning_train.py b/docs/source/examples/scripts/lightning_train.py index 8ec4e333..795e90a1 100644 --- a/docs/source/examples/scripts/lightning_train.py +++ b/docs/source/examples/scripts/lightning_train.py @@ -1,54 +1,100 @@ +# /// script +# requires-python = ">=3.12" +# dependencies = [ +# "datasets", +# "lightning", +# "torch", +# "torchrunx", +# "transformers", +# "tyro", +# ] +# /// + import os -from pathlib import Path +import functools +from dataclasses import dataclass +from typing import Annotated import lightning as L import torch from datasets import load_dataset -from torch import nn from torch.utils.data import Dataset -from transformers import AutoModelForCausalLM, AutoTokenizer +from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig, PreTrainedModel import torchrunx -from torchrunx.integrations.lightning import TorchrunxClusterEnvironment - -class GPT2CausalLMDataset(Dataset): - def __init__(self, text_dataset): - self.dataset = text_dataset - self.tokenizer = AutoTokenizer.from_pretrained("gpt2") - self.tokenizer.pad_token = self.tokenizer.eos_token - self.max_length = 1024 - - def __len__(self): - return len(self.dataset) - - def __getitem__(self, idx): - encoded = self.tokenizer( - self.dataset[idx]["text"], - max_length=self.max_length, - truncation=True, - padding="max_length", - return_tensors="pt", - ) - - input_ids = encoded.input_ids.squeeze() - attention_mask = encoded.attention_mask.squeeze() - labels = input_ids.clone() - - return { - "input_ids": input_ids, - "attention_mask": attention_mask, - "labels": labels, - } - - -class GPT2LightningWrapper(L.LightningModule): - def __init__(self): +# from torchrunx.integrations.lightning import TorchrunxClusterEnvironment +import tyro + +from lightning.fabric.plugins.environments.torchelastic import ( + TorchElasticEnvironment, +) + + +class TorchrunxClusterEnvironment(TorchElasticEnvironment): + """Compatible ClusterEnvironment for PyTorch Lightning.""" + + @staticmethod + def detect() -> bool: + """Force use of the TorchElasticEnvironment.""" + return True + + +@dataclass +class ModelConfig: + name: str + + +@dataclass +class DatasetConfig: + path: str + name: str | None = None + split: str | None = None + text_column: str = "text" + num_samples: int | None = None + + +def load_training_data( + tokenizer_name: str, + dataset_config: DatasetConfig, +) -> Dataset: + # Load dataset + + dataset = load_dataset(dataset_config.path, name=dataset_config.name, split=dataset_config.split) + if dataset_config.num_samples is not None: + dataset = dataset.select(range(dataset_config.num_samples)) + + # Build tokenizer + + os.environ["TOKENIZERS_PARALLELISM"] = "false" # to suppress warnings + tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + tokenize_fn = functools.partial( + tokenizer, + max_length=tokenizer.model_max_length, + truncation=True, + padding="max_length", + ) + + # Tokenize dataset + + return dataset.map( + tokenize_fn, + batched=True, + input_columns=[dataset_config.text_column], + remove_columns=[dataset_config.text_column], + ).map(lambda x: {"labels": x["input_ids"]}) + + + +class CausalLMLightningWrapper(L.LightningModule): + def __init__(self, model): super().__init__() - self.model = AutoModelForCausalLM.from_pretrained("gpt2") + self.model = model def training_step(self, batch, *args): # pyright: ignore - device_batch = {k: v.to(self.model.device) for k, v in batch.items()} + device_batch = {k: torch.stack(v, dim=0).to(self.model.device) for k, v in batch.items()} loss = self.model(**device_batch).loss self.log("train_loss", loss) return loss @@ -58,19 +104,18 @@ def configure_optimizers(self): return optimizer -def train(): - lightning_model = GPT2LightningWrapper() +def train( + model: PreTrainedModel, + train_dataset: Dataset +) -> str: + + lightning_model = CausalLMLightningWrapper(model) - wikitext_train = load_dataset("Salesforce/wikitext", "wikitext-2-v1", split="train") - train_dataset = GPT2CausalLMDataset(text_dataset=wikitext_train) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=8) trainer = L.Trainer( accelerator="gpu", - limit_train_batches=10, max_epochs=1, - devices=2, - num_nodes=1, strategy="ddp", plugins=[TorchrunxClusterEnvironment()], enable_checkpointing=False @@ -83,12 +128,26 @@ def train(): return checkpoint -if __name__ == "__main__": - results = torchrunx.launch( - func=train, - hostnames=["localhost"], - workers_per_host=2, - ) +def main( + launcher: torchrunx.Launcher, + model_config: Annotated[ModelConfig, tyro.conf.arg(name="model")], + dataset_config: Annotated[DatasetConfig, tyro.conf.arg(name="dataset")], +): + model = AutoModelForCausalLM.from_pretrained(model_config.name) + train_dataset = load_training_data(tokenizer_name=model_config.name, dataset_config=dataset_config) + # Launch training + results = launcher.run(train, (model, train_dataset)) + + # Loading trained model from checkpoint checkpoint_path = results.rank(0) - print(f"Checkpoint at: {checkpoint_path}") + dummy_model = AutoModelForCausalLM.from_config( + AutoConfig.from_pretrained(model_config.name) + ) + model = CausalLMLightningWrapper(dummy_model) + model.load_state_dict(torch.load(checkpoint_path)["state_dict"]) + trained_model = model.model + + +if __name__ == "__main__": + tyro.cli(main) \ No newline at end of file From 93d1ec12375508fdf5d6b2eda513c12f61bef10d Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Wed, 12 Feb 2025 15:53:05 -0500 Subject: [PATCH 106/141] docs for updated training scripts --- docs/source/examples/accelerate.md | 33 +++++++++++++ docs/source/examples/deepspeed.md | 39 +++++++++++++++ docs/source/examples/lightning.md | 33 +++++++++++++ .../examples/scripts/accelerate_help.txt | 39 +++++++++++++++ .../examples/scripts/accelerate_train.py | 17 +++++-- .../examples/scripts/deepspeed_config.json | 17 ++----- .../examples/scripts/deepspeed_help.txt | 47 +++++++++++++++++++ .../examples/scripts/deepspeed_train.py | 35 +++++++------- .../examples/scripts/lightning_help.txt | 39 +++++++++++++++ .../examples/scripts/lightning_train.py | 41 ++++++++-------- .../examples/scripts/transformers_train.py | 11 +++-- 11 files changed, 289 insertions(+), 62 deletions(-) create mode 100644 docs/source/examples/scripts/accelerate_help.txt create mode 100644 docs/source/examples/scripts/deepspeed_help.txt create mode 100644 docs/source/examples/scripts/lightning_help.txt diff --git a/docs/source/examples/accelerate.md b/docs/source/examples/accelerate.md index 6c7b8e39..edfa5b70 100644 --- a/docs/source/examples/accelerate.md +++ b/docs/source/examples/accelerate.md @@ -1,5 +1,38 @@ # Accelerate +Here's an example script that uses `torchrunx` with [Accelerate](https://huggingface.co/docs/accelerate/en/index) to fine-tune any causal language model (from `transformers`) on any text dataset (from `datasets`) with any number of GPUs or nodes. + +[https://torchrun.xyz/accelerate_train.py](https://raw.githubusercontent.com/apoorvkh/torchrunx/refs/heads/main/docs/source/examples/scripts/accelerate_train.py) + +
+

python accelerate_train.py --help

(expand)
+ + ```{eval-rst} + .. literalinclude:: ./scripts/accelerate_help.txt + ``` +
+ + - `--launcher`: [torchrunx.Launcher](../api.md#torchrunx.Launcher) + - `--model`: [`transformers.AutoModelForCausalLM`](https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoModelForCausalLM) + - `--dataset`: [`datasets.load_dataset`](https://huggingface.co/docs/datasets/en/package_reference/loading_methods#datasets.load_dataset) + +Required: `--model.name`, `--dataset.path` + +### Training GPT-2 on WikiText in One Line + +The following command runs our script end-to-end: installing all dependencies, downloading model and data, training, logging to TensorBoard, etc. Pre-requisite: [uv](https://docs.astral.sh/uv) + +```bash +uv run https://torchrun.xyz/accelerate_train.py \ + --model.name gpt2 \ + --dataset.path "Salesforce/wikitext" --dataset.name "wikitext-2-v1" --dataset.split "train" --dataset.num-samples 80 +``` + +For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. + +### Script + ```{eval-rst} .. literalinclude:: ./scripts/accelerate_train.py + :start-after: # [docs:start-after] ``` diff --git a/docs/source/examples/deepspeed.md b/docs/source/examples/deepspeed.md index 15cf1d7a..b3669618 100644 --- a/docs/source/examples/deepspeed.md +++ b/docs/source/examples/deepspeed.md @@ -1,6 +1,45 @@ # DeepSpeed +Here's an example script that uses `torchrunx` with [DeepSpeed](https://www.deepspeed.ai/) to fine-tune any causal language model (from `transformers`) on any text dataset (from `datasets`) with any number of GPUs or nodes. + +[https://torchrun.xyz/deepspeed_train.py](https://raw.githubusercontent.com/apoorvkh/torchrunx/refs/heads/main/docs/source/examples/scripts/deepspeed_train.py) + +
+

python accelerate_train.py --help

(expand)
+ + ```{eval-rst} + .. literalinclude:: ./scripts/deepspeed_help.txt + ``` +
+ + - `--launcher`: [torchrunx.Launcher](../api.md#torchrunx.Launcher) + - `--model`: [`transformers.AutoModelForCausalLM`](https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoModelForCausalLM) + - `--dataset`: [`datasets.load_dataset`](https://huggingface.co/docs/datasets/en/package_reference/loading_methods#datasets.load_dataset) + - `--deepspeed`: [`DeepSpeedArgs`](#script) + +Required: `--model.name`, `--dataset.path`, `--deepspeed.deepspeed-config` + +### Training GPT-2 on WikiText in One Line + +The following command runs our script end-to-end: installing all dependencies, downloading model and data, training, logging to TensorBoard, etc. Pre-requisite: [uv](https://docs.astral.sh/uv), and a [DeepSpeed configuration file](#example-configuration-file). + +```bash +uv run https://torchrun.xyz/deepspeed_train.py \ + --model.name gpt2 \ + --dataset.path "Salesforce/wikitext" --dataset.name "wikitext-2-v1" --dataset.split "train" --dataset.num-samples 80 \ + --deepspeed.deepspeed-config deepspeed_config.json +``` + +For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. + +### Script + ```{eval-rst} .. literalinclude:: ./scripts/deepspeed_train.py + :start-after: # [docs:start-after] +``` + +### Example configuration file +```{eval-rst} .. literalinclude:: ./scripts/deepspeed_config.json ``` diff --git a/docs/source/examples/lightning.md b/docs/source/examples/lightning.md index 3c414b94..b3f1171f 100644 --- a/docs/source/examples/lightning.md +++ b/docs/source/examples/lightning.md @@ -1,5 +1,38 @@ # PyTorch Lightning +Here's an example script that uses `torchrunx` with [PyTorch Lightning](https://lightning.ai/docs/pytorch/stable/) to fine-tune any causal language model (from `transformers`) on any text dataset (from `datasets`) with any number of GPUs or nodes. + +[https://torchrun.xyz/lightning_train.py](https://raw.githubusercontent.com/apoorvkh/torchrunx/refs/heads/main/docs/source/examples/scripts/lightning_train.py) + +
+

python accelerate_train.py --help

(expand)
+ + ```{eval-rst} + .. literalinclude:: ./scripts/lightning_help.txt + ``` +
+ + - `--launcher`: [torchrunx.Launcher](../api.md#torchrunx.Launcher) + - `--model`: [`transformers.AutoModelForCausalLM`](https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoModelForCausalLM) + - `--dataset`: [`datasets.load_dataset`](https://huggingface.co/docs/datasets/en/package_reference/loading_methods#datasets.load_dataset) + +Required: `--model.name`, `--dataset.path` + +### Training GPT-2 on WikiText in One Line + +The following command runs our script end-to-end: installing all dependencies, downloading model and data, training, logging to TensorBoard, etc. Pre-requisite: [uv](https://docs.astral.sh/uv) + +```bash +uv run https://torchrun.xyz/lightning_train.py \ + --model.name gpt2 \ + --dataset.path "Salesforce/wikitext" --dataset.name "wikitext-2-v1" --dataset.split "train" --dataset.num-samples 80 +``` + +For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. + +### Script + ```{eval-rst} .. literalinclude:: ./scripts/lightning_train.py + :start-after: # [docs:start-after] ``` diff --git a/docs/source/examples/scripts/accelerate_help.txt b/docs/source/examples/scripts/accelerate_help.txt new file mode 100644 index 00000000..23be1b36 --- /dev/null +++ b/docs/source/examples/scripts/accelerate_help.txt @@ -0,0 +1,39 @@ +usage: accelerate_train.py [-h] [OPTIONS] + +╭─ options ──────────────────────────────────────────────────────────────────╮ +│ -h, --help show this help message and exit │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ launcher options ─────────────────────────────────────────────────────────╮ +│ Useful for sequential invocations or for specifying arguments via CLI. │ +│ ────────────────────────────────────────────────────────────────────────── │ +│ --launcher.hostnames {[STR [STR ...]]}|{auto,slurm} │ +│ (default: auto) │ +│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ +│ (default: auto) │ +│ --launcher.ssh-config-file {None}|STR|PATHLIKE │ +│ (default: None) │ +│ --launcher.backend {None,nccl,gloo,mpi,ucc,auto} │ +│ (default: auto) │ +│ --launcher.timeout INT (default: 600) │ +│ --launcher.default-env-vars [STR [STR ...]] │ +│ (default: PATH LD_LIBRARY LIBRARY_PATH 'PYTHON*' │ +│ 'CUDA*' 'TORCH*' 'PYTORCH*' 'NCCL*') │ +│ --launcher.extra-env-vars [STR [STR ...]] │ +│ (default: ) │ +│ --launcher.env-file {None}|STR|PATHLIKE │ +│ (default: None) │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ model options ────────────────────────────────────────────────────────────╮ +│ --model.name STR (required) │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ dataset options ──────────────────────────────────────────────────────────╮ +│ --dataset.path STR (required) │ +│ --dataset.name {None}|STR │ +│ (default: None) │ +│ --dataset.split {None}|STR │ +│ (default: None) │ +│ --dataset.text-column STR │ +│ (default: text) │ +│ --dataset.num-samples {None}|INT │ +│ (default: None) │ +╰────────────────────────────────────────────────────────────────────────────╯ diff --git a/docs/source/examples/scripts/accelerate_train.py b/docs/source/examples/scripts/accelerate_train.py index d4e1ca55..02d7ed8e 100644 --- a/docs/source/examples/scripts/accelerate_train.py +++ b/docs/source/examples/scripts/accelerate_train.py @@ -10,19 +10,21 @@ # ] # /// +# [docs:start-after] import functools import os from dataclasses import dataclass from typing import Annotated import torch +import tyro from accelerate import Accelerator from datasets import load_dataset from torch.utils.data import Dataset -from transformers import AutoModelForCausalLM, PreTrainedModel, AutoTokenizer +from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedModel import torchrunx -import tyro + @dataclass class ModelConfig: @@ -37,13 +39,16 @@ class DatasetConfig: text_column: str = "text" num_samples: int | None = None + def load_training_data( tokenizer_name: str, dataset_config: DatasetConfig, ) -> Dataset: # Load dataset - dataset = load_dataset(dataset_config.path, name=dataset_config.name, split=dataset_config.split) + dataset = load_dataset( + dataset_config.path, name=dataset_config.name, split=dataset_config.split + ) if dataset_config.num_samples is not None: dataset = dataset.select(range(dataset_config.num_samples)) @@ -74,7 +79,6 @@ def train( model: PreTrainedModel, train_dataset: Dataset, ) -> str: - accelerator = Accelerator() optimizer = torch.optim.Adam(model.parameters()) @@ -99,6 +103,7 @@ def train( return "output/" + def main( launcher: torchrunx.Launcher, model_config: Annotated[ModelConfig, tyro.conf.arg(name="model")], @@ -106,7 +111,9 @@ def main( # training_args: Annotated[TrainingArguments, tyro.conf.arg(name="trainer", help="")], ): model = AutoModelForCausalLM.from_pretrained(model_config.name) - train_dataset = load_training_data(tokenizer_name=model_config.name, dataset_config=dataset_config) + train_dataset = load_training_data( + tokenizer_name=model_config.name, dataset_config=dataset_config + ) # Launch training results = launcher.run(train, (model, train_dataset)) diff --git a/docs/source/examples/scripts/deepspeed_config.json b/docs/source/examples/scripts/deepspeed_config.json index 042f8716..953a5b34 100644 --- a/docs/source/examples/scripts/deepspeed_config.json +++ b/docs/source/examples/scripts/deepspeed_config.json @@ -1,15 +1,6 @@ { - "zero_optimization": { - "stage": 1, - "reduce_bucket_size": 5e8 - }, - "optimizer": { - "type": "AdamW", - "params": { - "betas": [0.9, 0.999], - "eps": 1e-8 - } - }, + "zero_optimization": {"stage": 1, "reduce_bucket_size": 5e8}, + "optimizer": {"type": "AdamW", "params": {"betas": [0.9, 0.999], "eps": 1e-8}}, "steps_per_print": 2000, - "train_batch_size": 8 -} \ No newline at end of file + "train_batch_size": 8, +} diff --git a/docs/source/examples/scripts/deepspeed_help.txt b/docs/source/examples/scripts/deepspeed_help.txt new file mode 100644 index 00000000..f558151d --- /dev/null +++ b/docs/source/examples/scripts/deepspeed_help.txt @@ -0,0 +1,47 @@ +[2025-02-12 15:50:18,674] [INFO] [real_accelerator.py:222:get_accelerator] Setting ds_accelerator to cuda (auto detect) +Warning: The cache directory for DeepSpeed Triton autotune, /users/pcurtin1/.triton/autotune, appears to be on an NFS system. While this is generally acceptable, if you experience slowdowns or hanging when DeepSpeed exits, it is recommended to set the TRITON_CACHE_DIR environment variable to a non-NFS path. +usage: deepspeed_train.py [-h] [OPTIONS] + +╭─ options ──────────────────────────────────────────────────────────────────╮ +│ -h, --help show this help message and exit │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ launcher options ─────────────────────────────────────────────────────────╮ +│ Useful for sequential invocations or for specifying arguments via CLI. │ +│ ────────────────────────────────────────────────────────────────────────── │ +│ --launcher.hostnames {[STR [STR ...]]}|{auto,slurm} │ +│ (default: auto) │ +│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ +│ (default: auto) │ +│ --launcher.ssh-config-file {None}|STR|PATHLIKE │ +│ (default: None) │ +│ --launcher.backend {None,nccl,gloo,mpi,ucc,auto} │ +│ (default: auto) │ +│ --launcher.timeout INT (default: 600) │ +│ --launcher.default-env-vars [STR [STR ...]] │ +│ (default: PATH LD_LIBRARY LIBRARY_PATH 'PYTHON*' │ +│ 'CUDA*' 'TORCH*' 'PYTORCH*' 'NCCL*') │ +│ --launcher.extra-env-vars [STR [STR ...]] │ +│ (default: ) │ +│ --launcher.env-file {None}|STR|PATHLIKE │ +│ (default: None) │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ model options ────────────────────────────────────────────────────────────╮ +│ --model.name STR (required) │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ dataset options ──────────────────────────────────────────────────────────╮ +│ --dataset.path STR (required) │ +│ --dataset.name {None}|STR │ +│ (default: None) │ +│ --dataset.split {None}|STR │ +│ (default: None) │ +│ --dataset.text-column STR │ +│ (default: text) │ +│ --dataset.num-samples {None}|INT │ +│ (default: None) │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ deepspeed options ────────────────────────────────────────────────────────╮ +│ --deepspeed.deepspeed-config STR │ +│ (required) │ +│ --deepspeed.local-rank {None}|INT │ +│ (default: None) │ +╰────────────────────────────────────────────────────────────────────────────╯ diff --git a/docs/source/examples/scripts/deepspeed_train.py b/docs/source/examples/scripts/deepspeed_train.py index 45e19dff..240ccc40 100644 --- a/docs/source/examples/scripts/deepspeed_train.py +++ b/docs/source/examples/scripts/deepspeed_train.py @@ -11,22 +11,21 @@ # ] # /// -import argparse +# [docs:start-after] import functools import os -from dataclasses import dataclass +from dataclasses import dataclass, InitVar from typing import Annotated import deepspeed -from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint import torch - +import tyro from datasets import load_dataset +from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint from torch.utils.data import Dataset -from transformers import AutoModelForCausalLM, PreTrainedModel, AutoTokenizer, AutoConfig +from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedModel import torchrunx -import tyro @dataclass @@ -55,7 +54,9 @@ def load_training_data( ) -> Dataset: # Load dataset - dataset = load_dataset(dataset_config.path, name=dataset_config.name, split=dataset_config.split) + dataset = load_dataset( + dataset_config.path, name=dataset_config.name, split=dataset_config.split + ) if dataset_config.num_samples is not None: dataset = dataset.select(range(dataset_config.num_samples)) @@ -82,19 +83,14 @@ def load_training_data( ).map(lambda x: {"labels": x["input_ids"]}) -def train( - model: PreTrainedModel, - train_dataset: Dataset, - deepspeed_args: DeepSpeedArgs -) -> str: - +def train(model: PreTrainedModel, train_dataset: Dataset, deepspeed_args: DeepSpeedArgs) -> str: deepspeed_args.local_rank = int(os.environ["LOCAL_RANK"]) model_engine, _, loader, _ = deepspeed.initialize( args=deepspeed_args, model=model, model_parameters=model.parameters(), - training_data=train_dataset + training_data=train_dataset, ) model_engine.train() @@ -115,14 +111,17 @@ def train( return checkpoint_dir + def main( launcher: torchrunx.Launcher, model_config: Annotated[ModelConfig, tyro.conf.arg(name="model")], dataset_config: Annotated[DatasetConfig, tyro.conf.arg(name="dataset")], - deepspeed_args: Annotated[DeepSpeedArgs, tyro.conf.arg(name="deepspeed")] + deepspeed_args: Annotated[DeepSpeedArgs, tyro.conf.arg(name="deepspeed")], ): model = AutoModelForCausalLM.from_pretrained(model_config.name) - train_dataset = load_training_data(tokenizer_name=model_config.name, dataset_config=dataset_config) + train_dataset = load_training_data( + tokenizer_name=model_config.name, dataset_config=dataset_config + ) # Launch training results = launcher.run(train, (model, train_dataset, deepspeed_args)) @@ -130,9 +129,7 @@ def main( # Loading trained model from checkpoint checkpoint_path = results.rank(0) state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_path) - trained_model = AutoModelForCausalLM.from_config( - AutoConfig.from_pretrained(model_config.name) - ) + trained_model = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(model_config.name)) trained_model.load_state_dict(state_dict) diff --git a/docs/source/examples/scripts/lightning_help.txt b/docs/source/examples/scripts/lightning_help.txt new file mode 100644 index 00000000..54d5ef57 --- /dev/null +++ b/docs/source/examples/scripts/lightning_help.txt @@ -0,0 +1,39 @@ +usage: lightning_train.py [-h] [OPTIONS] + +╭─ options ──────────────────────────────────────────────────────────────────╮ +│ -h, --help show this help message and exit │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ launcher options ─────────────────────────────────────────────────────────╮ +│ Useful for sequential invocations or for specifying arguments via CLI. │ +│ ────────────────────────────────────────────────────────────────────────── │ +│ --launcher.hostnames {[STR [STR ...]]}|{auto,slurm} │ +│ (default: auto) │ +│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ +│ (default: auto) │ +│ --launcher.ssh-config-file {None}|STR|PATHLIKE │ +│ (default: None) │ +│ --launcher.backend {None,nccl,gloo,mpi,ucc,auto} │ +│ (default: auto) │ +│ --launcher.timeout INT (default: 600) │ +│ --launcher.default-env-vars [STR [STR ...]] │ +│ (default: PATH LD_LIBRARY LIBRARY_PATH 'PYTHON*' │ +│ 'CUDA*' 'TORCH*' 'PYTORCH*' 'NCCL*') │ +│ --launcher.extra-env-vars [STR [STR ...]] │ +│ (default: ) │ +│ --launcher.env-file {None}|STR|PATHLIKE │ +│ (default: None) │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ model options ────────────────────────────────────────────────────────────╮ +│ --model.name STR (required) │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ dataset options ──────────────────────────────────────────────────────────╮ +│ --dataset.path STR (required) │ +│ --dataset.name {None}|STR │ +│ (default: None) │ +│ --dataset.split {None}|STR │ +│ (default: None) │ +│ --dataset.text-column STR │ +│ (default: text) │ +│ --dataset.num-samples {None}|INT │ +│ (default: None) │ +╰────────────────────────────────────────────────────────────────────────────╯ diff --git a/docs/source/examples/scripts/lightning_train.py b/docs/source/examples/scripts/lightning_train.py index 795e90a1..cc71d1cc 100644 --- a/docs/source/examples/scripts/lightning_train.py +++ b/docs/source/examples/scripts/lightning_train.py @@ -10,25 +10,25 @@ # ] # /// -import os +# [docs:start-after] import functools +import os from dataclasses import dataclass from typing import Annotated import lightning as L import torch -from datasets import load_dataset - -from torch.utils.data import Dataset -from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig, PreTrainedModel -import torchrunx # from torchrunx.integrations.lightning import TorchrunxClusterEnvironment import tyro - +from datasets import load_dataset from lightning.fabric.plugins.environments.torchelastic import ( TorchElasticEnvironment, ) +from torch.utils.data import Dataset +from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedModel + +import torchrunx class TorchrunxClusterEnvironment(TorchElasticEnvironment): @@ -60,7 +60,9 @@ def load_training_data( ) -> Dataset: # Load dataset - dataset = load_dataset(dataset_config.path, name=dataset_config.name, split=dataset_config.split) + dataset = load_dataset( + dataset_config.path, name=dataset_config.name, split=dataset_config.split + ) if dataset_config.num_samples is not None: dataset = dataset.select(range(dataset_config.num_samples)) @@ -87,13 +89,12 @@ def load_training_data( ).map(lambda x: {"labels": x["input_ids"]}) - class CausalLMLightningWrapper(L.LightningModule): def __init__(self, model): super().__init__() self.model = model - def training_step(self, batch, *args): # pyright: ignore + def training_step(self, batch, *args): # pyright: ignore device_batch = {k: torch.stack(v, dim=0).to(self.model.device) for k, v in batch.items()} loss = self.model(**device_batch).loss self.log("train_loss", loss) @@ -104,11 +105,7 @@ def configure_optimizers(self): return optimizer -def train( - model: PreTrainedModel, - train_dataset: Dataset -) -> str: - +def train(model: PreTrainedModel, train_dataset: Dataset) -> str: lightning_model = CausalLMLightningWrapper(model) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=8) @@ -118,11 +115,11 @@ def train( max_epochs=1, strategy="ddp", plugins=[TorchrunxClusterEnvironment()], - enable_checkpointing=False + enable_checkpointing=False, ) trainer.fit(model=lightning_model, train_dataloaders=train_loader) - checkpoint = f"{trainer.log_dir}/final.ckpt" + checkpoint = f"{trainer.log_dir}/final.ckpt" trainer.save_checkpoint(checkpoint) return checkpoint @@ -134,20 +131,20 @@ def main( dataset_config: Annotated[DatasetConfig, tyro.conf.arg(name="dataset")], ): model = AutoModelForCausalLM.from_pretrained(model_config.name) - train_dataset = load_training_data(tokenizer_name=model_config.name, dataset_config=dataset_config) + train_dataset = load_training_data( + tokenizer_name=model_config.name, dataset_config=dataset_config + ) # Launch training results = launcher.run(train, (model, train_dataset)) # Loading trained model from checkpoint checkpoint_path = results.rank(0) - dummy_model = AutoModelForCausalLM.from_config( - AutoConfig.from_pretrained(model_config.name) - ) + dummy_model = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(model_config.name)) model = CausalLMLightningWrapper(dummy_model) model.load_state_dict(torch.load(checkpoint_path)["state_dict"]) trained_model = model.model if __name__ == "__main__": - tyro.cli(main) \ No newline at end of file + tyro.cli(main) diff --git a/docs/source/examples/scripts/transformers_train.py b/docs/source/examples/scripts/transformers_train.py index 3048ba7a..56eb2f68 100644 --- a/docs/source/examples/scripts/transformers_train.py +++ b/docs/source/examples/scripts/transformers_train.py @@ -15,6 +15,7 @@ from dataclasses import dataclass from typing import Annotated +import tyro from datasets import Dataset, load_dataset from transformers import ( AutoModelForCausalLM, @@ -24,8 +25,8 @@ TrainingArguments, trainer_utils, ) + import torchrunx -import tyro @dataclass @@ -48,7 +49,9 @@ def load_training_data( ) -> Dataset: # Load dataset - dataset = load_dataset(dataset_config.path, name=dataset_config.name, split=dataset_config.split) + dataset = load_dataset( + dataset_config.path, name=dataset_config.name, split=dataset_config.split + ) if dataset_config.num_samples is not None: dataset = dataset.select(range(dataset_config.num_samples)) @@ -92,7 +95,9 @@ def main( training_args: Annotated[TrainingArguments, tyro.conf.arg(name="trainer", help="")], ): model = AutoModelForCausalLM.from_pretrained(model_config.name) - train_dataset = load_training_data(tokenizer_name=model_config.name, dataset_config=dataset_config) + train_dataset = load_training_data( + tokenizer_name=model_config.name, dataset_config=dataset_config + ) # Launch training results = launcher.run(train, (model, train_dataset, training_args)) From b85c1c7a6d7b22031921b0fc3e9b69585f3f329f Mon Sep 17 00:00:00 2001 From: Peter Curtin <98424367+pmcurtin@users.noreply.github.com> Date: Fri, 14 Feb 2025 13:34:51 -0500 Subject: [PATCH 107/141] Update workflows.md --- docs/source/features/workflows.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/docs/source/features/workflows.md b/docs/source/features/workflows.md index 46e5d032..c6f82cea 100644 --- a/docs/source/features/workflows.md +++ b/docs/source/features/workflows.md @@ -24,3 +24,24 @@ print(f'Accuracy: {accuracy}') ``` {mod}`torchrunx.launch` is self-cleaning: all processes are terminated (and the used memory is completely released) before the subsequent invocation. + +## Retries + +Sometimes distributed functions will fail randomly (OOM, networking, or resource errors), and should be executed again. Remember, {mod}`torchrunx.launch` will raise whatever exception its workers raise, so you can catch specific exceptions as you normally would. To retry launching a distributed function, we recommend doing the following: + +```python +import torchrunx as trx + +n_retries = 5 + +for r in range(n_retries + 1): + try: + trx.launch(train, hostnames=...) + except CudaOOMError: + print("retrying") + if r == n_retries: + raise Exception("maximum retries attempted") + else: + break + +``` From 49b905de797ef24e87e6701e0858f523d88769f4 Mon Sep 17 00:00:00 2001 From: Peter Curtin Date: Fri, 14 Feb 2025 14:05:08 -0500 Subject: [PATCH 108/141] tyro cli help strings --- src/torchrunx/launcher.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index adb7dd17..622ca0e4 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -126,10 +126,19 @@ class Launcher: """Alias class for :func:`launch`. Refer to that function for documentation.""" hostnames: list[str] | Literal["auto", "slurm"] = "auto" + """Node hostnames to use in distributed execution. "auto" and "slurm" attempt to detect this + for you based on your environmental variables.""" workers_per_host: int | list[int] | Literal["auto"] = "auto" + """Number of worker processes per node. You can specify a constant number of workers for all + nodes (int), a different number of workers for each node (list[int]), or automatically determine + it per-node ("auto").""" ssh_config_file: str | os.PathLike | None = None + """Path to custom SSH Config for passwordless SSH into each node.""" backend: Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None = "auto" + """A torch.distributed backend to use for inter-process communication. "auto" will use NCCL if + GPUs are detected, otherwise GLOO.""" timeout: int = 600 + """The torch.distributed communication timeout of the worker process group, in seconds.""" default_env_vars: tuple[str, ...] = ( "PATH", "LD_LIBRARY", @@ -140,9 +149,15 @@ class Launcher: "PYTORCH*", "NCCL*", ) + """Environmental variables to clone from the launcher process to worker processes, + supporting unix pattern matching.""" extra_env_vars: tuple[str, ...] = () + """Additional environmental variables to set in the worker process environments, + formatted identically to the defaul_env_vars field.""" env_file: str | os.PathLike | None = None + """A bash style .env file that will be sourced by worker processes.""" propagate_exceptions: bool = True + """Whether worker exceptions should be raised by the launcher.""" def __post_init__(self) -> None: """Initializing ``handler_factory``. Inclusion in ``__init__`` inhibits CLI generation.""" From e87ea25339ffbfc911003b7eea9b6f06d74872ce Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 14 Feb 2025 17:20:42 -0500 Subject: [PATCH 109/141] ensuring rank-order consistency in launcher-agent all_gather --- src/torchrunx/utils/comm.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/torchrunx/utils/comm.py b/src/torchrunx/utils/comm.py index e4733bfc..7634c9c1 100644 --- a/src/torchrunx/utils/comm.py +++ b/src/torchrunx/utils/comm.py @@ -69,17 +69,18 @@ def _deserialize(self, serialized: bytes) -> Any: return cloudpickle.loads(serialized) def _all_gather(self, obj: Any) -> list: - """Gather object from every rank to list on every rank. + """Gather object from each rank to list (in rank-order). Raises: AgentFailedError: if any agent fails (observed by this communication). """ try: - object_bytes = self._serialize(obj) - object_list = [b""] * self.world_size + rank_obj = self._serialize((self.rank, obj)) + rank_obj_list = [b""] * self.world_size # raises RuntimeError if timeout - dist.all_gather_object(object_list=object_list, obj=object_bytes, group=self.group) - return [self._deserialize(o) for o in object_list] + dist.all_gather_object(object_list=rank_obj_list, obj=rank_obj, group=self.group) + rank_obj_list = sorted([self._deserialize(o) for o in rank_obj_list]) + return [obj for _, obj in sorted(rank_obj_list)] except RuntimeError as e: # occurs if launcher or any agent dies and communication times out raise AgentFailedError from e @@ -147,7 +148,7 @@ def from_result(cls, result: RunProcsResult | None) -> Self: for local_rank, failure in result.failures.items(): result.return_values[local_rank] = WorkerFailedError(failure.message) - return_values = list(result.return_values.values()) + return_values = [result.return_values[key] for key in sorted(result.return_values.keys())] failed = any(isinstance(v, (ExceptionFromWorker, WorkerFailedError)) for v in return_values) state = "failed" if failed else "done" From 0c2f131da8840954c9b2461a58315f947b92d22f Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 14 Feb 2025 18:01:18 -0500 Subject: [PATCH 110/141] bump accelerate example --- docs/source/examples/accelerate.md | 10 +++---- .../examples/scripts/accelerate_help.txt | 8 ++++-- .../examples/scripts/accelerate_train.py | 28 ++++++++++--------- .../examples/scripts/deepspeed_train.py | 2 ++ .../examples/scripts/lightning_train.py | 2 ++ .../examples/scripts/transformers_help.txt | 6 ++-- .../examples/scripts/transformers_train.py | 2 ++ docs/source/examples/transformers.md | 2 +- 8 files changed, 37 insertions(+), 23 deletions(-) diff --git a/docs/source/examples/accelerate.md b/docs/source/examples/accelerate.md index edfa5b70..7313aada 100644 --- a/docs/source/examples/accelerate.md +++ b/docs/source/examples/accelerate.md @@ -12,24 +12,24 @@ Here's an example script that uses `torchrunx` with [Accelerate](https://hugging ``` + - Custom script arguments: `--batch-size`, `--output-dir` - `--launcher`: [torchrunx.Launcher](../api.md#torchrunx.Launcher) - `--model`: [`transformers.AutoModelForCausalLM`](https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoModelForCausalLM) - `--dataset`: [`datasets.load_dataset`](https://huggingface.co/docs/datasets/en/package_reference/loading_methods#datasets.load_dataset) -Required: `--model.name`, `--dataset.path` +Required: `--batch-size`, `--output-dir`, `--model.name`, `--dataset.path` -### Training GPT-2 on WikiText in One Line +--- -The following command runs our script end-to-end: installing all dependencies, downloading model and data, training, logging to TensorBoard, etc. Pre-requisite: [uv](https://docs.astral.sh/uv) +The following command installs dependencies and runs our script (for example, with `GPT-2` on `WikiText`). For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. Pre-requisite: [uv](https://docs.astral.sh/uv) ```bash uv run https://torchrun.xyz/accelerate_train.py \ + --batch-size 8 --output-dir output \ --model.name gpt2 \ --dataset.path "Salesforce/wikitext" --dataset.name "wikitext-2-v1" --dataset.split "train" --dataset.num-samples 80 ``` -For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. - ### Script ```{eval-rst} diff --git a/docs/source/examples/scripts/accelerate_help.txt b/docs/source/examples/scripts/accelerate_help.txt index 23be1b36..f0dd755d 100644 --- a/docs/source/examples/scripts/accelerate_help.txt +++ b/docs/source/examples/scripts/accelerate_help.txt @@ -2,13 +2,15 @@ usage: accelerate_train.py [-h] [OPTIONS] ╭─ options ──────────────────────────────────────────────────────────────────╮ │ -h, --help show this help message and exit │ +│ --batch-size INT (required) │ +│ --output-dir PATH (required) │ ╰────────────────────────────────────────────────────────────────────────────╯ ╭─ launcher options ─────────────────────────────────────────────────────────╮ -│ Useful for sequential invocations or for specifying arguments via CLI. │ +│ Alias class for :func:`launch`. Refer to that function for documentation. │ │ ────────────────────────────────────────────────────────────────────────── │ │ --launcher.hostnames {[STR [STR ...]]}|{auto,slurm} │ │ (default: auto) │ -│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ +│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{auto} │ │ (default: auto) │ │ --launcher.ssh-config-file {None}|STR|PATHLIKE │ │ (default: None) │ @@ -22,6 +24,8 @@ usage: accelerate_train.py [-h] [OPTIONS] │ (default: ) │ │ --launcher.env-file {None}|STR|PATHLIKE │ │ (default: None) │ +│ --launcher.propagate-exceptions, --launcher.no-propagate-exceptions │ +│ (default: True) │ ╰────────────────────────────────────────────────────────────────────────────╯ ╭─ model options ────────────────────────────────────────────────────────────╮ │ --model.name STR (required) │ diff --git a/docs/source/examples/scripts/accelerate_train.py b/docs/source/examples/scripts/accelerate_train.py index 02d7ed8e..40b572db 100644 --- a/docs/source/examples/scripts/accelerate_train.py +++ b/docs/source/examples/scripts/accelerate_train.py @@ -11,9 +11,12 @@ # /// # [docs:start-after] +from __future__ import annotations + import functools import os from dataclasses import dataclass +from pathlib import Path from typing import Annotated import torch @@ -78,18 +81,18 @@ def load_training_data( def train( model: PreTrainedModel, train_dataset: Dataset, -) -> str: + batch_size: int, + output_dir: Path, +) -> Path: accelerator = Accelerator() optimizer = torch.optim.Adam(model.parameters()) - train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=8) + train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size) model, optimizer, train_dataloader = accelerator.prepare(model, optimizer, train_dataloader) model.train() for batch_idx, batch in enumerate(train_dataloader): - if batch_idx == 10: - break device_batch = {k: torch.stack(v, dim=0).to(accelerator.device) for k, v in batch.items()} optimizer.zero_grad() @@ -99,28 +102,27 @@ def train( optimizer.step() - accelerator.unwrap_model(model).save_pretrained("output/") - - return "output/" + accelerator.wait_for_everyone() + accelerator.save_state(output_dir=output_dir, safe_serialization=False) + return output_dir / "pytorch_model.bin" def main( launcher: torchrunx.Launcher, model_config: Annotated[ModelConfig, tyro.conf.arg(name="model")], dataset_config: Annotated[DatasetConfig, tyro.conf.arg(name="dataset")], - # training_args: Annotated[TrainingArguments, tyro.conf.arg(name="trainer", help="")], + batch_size: int, + output_dir: Path, ): model = AutoModelForCausalLM.from_pretrained(model_config.name) - train_dataset = load_training_data( - tokenizer_name=model_config.name, dataset_config=dataset_config - ) + train_dataset = load_training_data(tokenizer_name=model_config.name, dataset_config=dataset_config) # Launch training - results = launcher.run(train, (model, train_dataset)) + results = launcher.run(train, (model, train_dataset, batch_size, output_dir)) # Loading trained model from checkpoint checkpoint_path = results.rank(0) - trained_model = AutoModelForCausalLM.from_pretrained(checkpoint_path) + trained_model = AutoModelForCausalLM.from_pretrained(model_config.name, state_dict=torch.load(checkpoint_path)) if __name__ == "__main__": diff --git a/docs/source/examples/scripts/deepspeed_train.py b/docs/source/examples/scripts/deepspeed_train.py index 240ccc40..edd5427d 100644 --- a/docs/source/examples/scripts/deepspeed_train.py +++ b/docs/source/examples/scripts/deepspeed_train.py @@ -12,6 +12,8 @@ # /// # [docs:start-after] +from __future__ import annotations + import functools import os from dataclasses import dataclass, InitVar diff --git a/docs/source/examples/scripts/lightning_train.py b/docs/source/examples/scripts/lightning_train.py index cc71d1cc..bebf4cc4 100644 --- a/docs/source/examples/scripts/lightning_train.py +++ b/docs/source/examples/scripts/lightning_train.py @@ -11,6 +11,8 @@ # /// # [docs:start-after] +from __future__ import annotations + import functools import os from dataclasses import dataclass diff --git a/docs/source/examples/scripts/transformers_help.txt b/docs/source/examples/scripts/transformers_help.txt index 3678bc59..dedeba6f 100644 --- a/docs/source/examples/scripts/transformers_help.txt +++ b/docs/source/examples/scripts/transformers_help.txt @@ -5,11 +5,11 @@ usage: transformers_train.py [-h] [OPTIONS] │ show this help message and exit │ ╰────────────────────────────────────────────────────────────────────────────╯ ╭─ launcher options ─────────────────────────────────────────────────────────╮ -│ Useful for sequential invocations or for specifying arguments via CLI. │ +│ Alias class for :func:`launch`. Refer to that function for documentation. │ │ ────────────────────────────────────────────────────────────────────────── │ │ --launcher.hostnames {[STR [STR ...]]}|{auto,slurm} │ │ (default: auto) │ -│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ +│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{auto} │ │ (default: auto) │ │ --launcher.ssh-config-file {None}|STR|PATHLIKE │ │ (default: None) │ @@ -24,6 +24,8 @@ usage: transformers_train.py [-h] [OPTIONS] │ (default: ) │ │ --launcher.env-file {None}|STR|PATHLIKE │ │ (default: None) │ +│ --launcher.propagate-exceptions, --launcher.no-propagate-exceptions │ +│ (default: True) │ ╰────────────────────────────────────────────────────────────────────────────╯ ╭─ model options ────────────────────────────────────────────────────────────╮ │ --model.name STR │ diff --git a/docs/source/examples/scripts/transformers_train.py b/docs/source/examples/scripts/transformers_train.py index 56eb2f68..5b313697 100644 --- a/docs/source/examples/scripts/transformers_train.py +++ b/docs/source/examples/scripts/transformers_train.py @@ -10,6 +10,8 @@ # /// # [docs:start-after] +from __future__ import annotations + import functools import os from dataclasses import dataclass diff --git a/docs/source/examples/transformers.md b/docs/source/examples/transformers.md index be3bc65f..705aedd1 100644 --- a/docs/source/examples/transformers.md +++ b/docs/source/examples/transformers.md @@ -27,7 +27,7 @@ The following command runs our script end-to-end: installing all dependencies, d uv run https://torchrun.xyz/transformers_train.py \ --model.name gpt2 \ --dataset.path "Salesforce/wikitext" --dataset.name "wikitext-2-v1" --dataset.split "train" --dataset.num-samples 80 \ - --trainer.output_dir output --trainer.per-device-train-batch-size 4 --trainer.report-to tensorboard + --trainer.output-dir output --trainer.per-device-train-batch-size 4 --trainer.report-to tensorboard ``` For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. From 4fdf9ef1ef0cda5f9a3033342038c0993c78dc12 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 15 Feb 2025 00:23:30 -0500 Subject: [PATCH 111/141] update example script docs --- README.md | 4 +- docs/source/examples/accelerate.md | 13 +-- docs/source/examples/composer.md | 1 - docs/source/examples/deepspeed.md | 74 ++++++++++++----- docs/source/examples/lightning.md | 16 ++-- .../examples/scripts/accelerate_help.txt | 6 +- .../examples/scripts/accelerate_train.py | 2 +- .../examples/scripts/deepspeed_config.json | 6 -- .../examples/scripts/deepspeed_help.txt | 37 ++++----- .../examples/scripts/deepspeed_train.py | 80 ++++++------------- .../examples/scripts/generate_help_menus.sh | 4 + .../examples/scripts/lightning_train.py | 26 ++---- .../examples/scripts/transformers_help.txt | 6 +- .../examples/scripts/transformers_train.py | 2 +- docs/source/examples/transformers.md | 21 +++-- docs/source/index.rst | 3 +- src/torchrunx/utils/logging.py | 2 +- 17 files changed, 134 insertions(+), 169 deletions(-) delete mode 100644 docs/source/examples/composer.md delete mode 100644 docs/source/examples/scripts/deepspeed_config.json create mode 100644 docs/source/examples/scripts/generate_help_menus.sh diff --git a/README.md b/README.md index 27e373b5..722f0edb 100644 --- a/README.md +++ b/README.md @@ -61,10 +61,11 @@ import torchrunx results = torchrunx.launch( func = train, - func_kwargs = dict( + kwargs = dict( model = nn.Linear(10, 10), num_steps = 10 ), + # hostnames = ["localhost", "second_machine"], workers_per_host = 2 ) @@ -78,7 +79,6 @@ torch.save(trained_model.state_dict(), "output/model.pth") - [HF Transformers](https://torchrun.xyz/examples/transformers.html) - [DeepSpeed](https://torchrun.xyz/examples/deepspeed.html) - [PyTorch Lightning](https://torchrun.xyz/examples/lightning.html) - - [MosaicML Composer](https://torchrun.xyz/examples/composer.html) **Refer to our [API](https://torchrun.xyz/api.html) and [Advanced Usage Guide](https://torchrun.xyz/advanced.html) for many more capabilities!** diff --git a/docs/source/examples/accelerate.md b/docs/source/examples/accelerate.md index 7313aada..a6f7645f 100644 --- a/docs/source/examples/accelerate.md +++ b/docs/source/examples/accelerate.md @@ -12,25 +12,18 @@ Here's an example script that uses `torchrunx` with [Accelerate](https://hugging ``` - - Custom script arguments: `--batch-size`, `--output-dir` - - `--launcher`: [torchrunx.Launcher](../api.md#torchrunx.Launcher) - - `--model`: [`transformers.AutoModelForCausalLM`](https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoModelForCausalLM) - - `--dataset`: [`datasets.load_dataset`](https://huggingface.co/docs/datasets/en/package_reference/loading_methods#datasets.load_dataset) - -Required: `--batch-size`, `--output-dir`, `--model.name`, `--dataset.path` - ---- +## Training GPT-2 on WikiText in One Line The following command installs dependencies and runs our script (for example, with `GPT-2` on `WikiText`). For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. Pre-requisite: [uv](https://docs.astral.sh/uv) ```bash -uv run https://torchrun.xyz/accelerate_train.py \ +uv run --python "3.12" https://torchrun.xyz/accelerate_train.py \ --batch-size 8 --output-dir output \ --model.name gpt2 \ --dataset.path "Salesforce/wikitext" --dataset.name "wikitext-2-v1" --dataset.split "train" --dataset.num-samples 80 ``` -### Script +## Script ```{eval-rst} .. literalinclude:: ./scripts/accelerate_train.py diff --git a/docs/source/examples/composer.md b/docs/source/examples/composer.md deleted file mode 100644 index baba39a5..00000000 --- a/docs/source/examples/composer.md +++ /dev/null @@ -1 +0,0 @@ -# MosaicML Composer diff --git a/docs/source/examples/deepspeed.md b/docs/source/examples/deepspeed.md index b3669618..4895ff05 100644 --- a/docs/source/examples/deepspeed.md +++ b/docs/source/examples/deepspeed.md @@ -1,45 +1,81 @@ # DeepSpeed -Here's an example script that uses `torchrunx` with [DeepSpeed](https://www.deepspeed.ai/) to fine-tune any causal language model (from `transformers`) on any text dataset (from `datasets`) with any number of GPUs or nodes. +Here's an example script that uses `torchrunx` with [DeepSpeed](https://www.deepspeed.ai) to fine-tune any causal language model (from `transformers`) on any text dataset (from `datasets`) with any number of GPUs or nodes. [https://torchrun.xyz/deepspeed_train.py](https://raw.githubusercontent.com/apoorvkh/torchrunx/refs/heads/main/docs/source/examples/scripts/deepspeed_train.py)
-

python accelerate_train.py --help

(expand)
+

python deepspeed_train.py --help

(expand)
```{eval-rst} .. literalinclude:: ./scripts/deepspeed_help.txt ```
- - `--launcher`: [torchrunx.Launcher](../api.md#torchrunx.Launcher) - - `--model`: [`transformers.AutoModelForCausalLM`](https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoModelForCausalLM) - - `--dataset`: [`datasets.load_dataset`](https://huggingface.co/docs/datasets/en/package_reference/loading_methods#datasets.load_dataset) - - `--deepspeed`: [`DeepSpeedArgs`](#script) +## Training GPT-2 on WikiText -Required: `--model.name`, `--dataset.path`, `--deepspeed.deepspeed-config` +Deepspeed requires additional (non-Python) dependencies. Use the following commands to set up a project. Source: [Apoorv's Blog — Managing Project Dependencies](https://blog.apoorvkh.com/posts/project-dependencies.html) -### Training GPT-2 on WikiText in One Line +```bash +# Install pixi +curl -fsSL https://pixi.sh/install.sh | bash + +# Create a project +pixi init my-project --format pyproject +cd my-project -The following command runs our script end-to-end: installing all dependencies, downloading model and data, training, logging to TensorBoard, etc. Pre-requisite: [uv](https://docs.astral.sh/uv), and a [DeepSpeed configuration file](#example-configuration-file). +# Install dependencies +pixi project channel add "conda-forge" "nvidia/label/cuda-12.4.0" +pixi add "python=3.12.7" "cuda=12.4.0" "gcc=11.4.0" "gxx=11.4.0" +pixi add --pypi "torch==2.5.1" "deepspeed" "datasets" "tensorboard" "torch" "torchrunx" "transformers" "tyro" + +cat < .env +export PYTHONNOUSERSITE="1" +export LIBRARY_PATH="\$CONDA_PREFIX/lib" +export LD_LIBRARY_PATH="\$CONDA_PREFIX/lib" +export CUDA_HOME="\$CONDA_PREFIX" +EOF + +# Activate environment +pixi shell +source .env +``` + +Download [deepspeed_train.py](https://raw.githubusercontent.com/apoorvkh/torchrunx/refs/heads/main/docs/source/examples/scripts/deepspeed_train.py) and create `deepspeed_config.json` with: + +```json +{ + "train_batch_size": 8, + "gradient_accumulation_steps": 1, + "optimizer": { + "type": "Adam", + "params": { "lr": 0.00015 } + }, + "fp16": { "enabled": true }, + "zero_optimization": true, + "tensorboard": { + "enabled": true, + "output_path": "output/tensorboard/", + "job_name": "gpt2_wikitext" + } +} +``` ```bash -uv run https://torchrun.xyz/deepspeed_train.py \ - --model.name gpt2 \ - --dataset.path "Salesforce/wikitext" --dataset.name "wikitext-2-v1" --dataset.split "train" --dataset.num-samples 80 \ - --deepspeed.deepspeed-config deepspeed_config.json +python deepspeed_train.py --model-name gpt2 --deepspeed-config deepspeed_config.json --checkpoint-dir output \ + --dataset.path "Salesforce/wikitext" --dataset.name "wikitext-2-v1" --dataset.split "train" --dataset.num-samples 80 ``` For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. -### Script +You can visualize the logs with: -```{eval-rst} -.. literalinclude:: ./scripts/deepspeed_train.py - :start-after: # [docs:start-after] +```bash +tensorboard --logdir output/tensorboard/gpt2_wikitext ``` -### Example configuration file +## Script + ```{eval-rst} -.. literalinclude:: ./scripts/deepspeed_config.json +.. literalinclude:: ./scripts/deepspeed_train.py ``` diff --git a/docs/source/examples/lightning.md b/docs/source/examples/lightning.md index b3f1171f..f33670b5 100644 --- a/docs/source/examples/lightning.md +++ b/docs/source/examples/lightning.md @@ -5,32 +5,26 @@ Here's an example script that uses `torchrunx` with [PyTorch Lightning](https:// [https://torchrun.xyz/lightning_train.py](https://raw.githubusercontent.com/apoorvkh/torchrunx/refs/heads/main/docs/source/examples/scripts/lightning_train.py)
-

python accelerate_train.py --help

(expand)
+

python lightning_train.py --help

(expand)
```{eval-rst} .. literalinclude:: ./scripts/lightning_help.txt ```
- - `--launcher`: [torchrunx.Launcher](../api.md#torchrunx.Launcher) - - `--model`: [`transformers.AutoModelForCausalLM`](https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoModelForCausalLM) - - `--dataset`: [`datasets.load_dataset`](https://huggingface.co/docs/datasets/en/package_reference/loading_methods#datasets.load_dataset) +## Training GPT-2 on WikiText in One Line -Required: `--model.name`, `--dataset.path` - -### Training GPT-2 on WikiText in One Line - -The following command runs our script end-to-end: installing all dependencies, downloading model and data, training, logging to TensorBoard, etc. Pre-requisite: [uv](https://docs.astral.sh/uv) +The following command runs our script end-to-end: installing all dependencies, downloading model and data, training, etc. Pre-requisite: [uv](https://docs.astral.sh/uv) ```bash -uv run https://torchrun.xyz/lightning_train.py \ +uv run --python "3.12" https://torchrun.xyz/lightning_train.py \ --model.name gpt2 \ --dataset.path "Salesforce/wikitext" --dataset.name "wikitext-2-v1" --dataset.split "train" --dataset.num-samples 80 ``` For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. -### Script +## Script ```{eval-rst} .. literalinclude:: ./scripts/lightning_train.py diff --git a/docs/source/examples/scripts/accelerate_help.txt b/docs/source/examples/scripts/accelerate_help.txt index f0dd755d..31e3b0b1 100644 --- a/docs/source/examples/scripts/accelerate_help.txt +++ b/docs/source/examples/scripts/accelerate_help.txt @@ -6,11 +6,11 @@ usage: accelerate_train.py [-h] [OPTIONS] │ --output-dir PATH (required) │ ╰────────────────────────────────────────────────────────────────────────────╯ ╭─ launcher options ─────────────────────────────────────────────────────────╮ -│ Alias class for :func:`launch`. Refer to that function for documentation. │ +│ Useful for sequential invocations or for specifying arguments via CLI. │ │ ────────────────────────────────────────────────────────────────────────── │ │ --launcher.hostnames {[STR [STR ...]]}|{auto,slurm} │ │ (default: auto) │ -│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{auto} │ +│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ │ (default: auto) │ │ --launcher.ssh-config-file {None}|STR|PATHLIKE │ │ (default: None) │ @@ -24,8 +24,6 @@ usage: accelerate_train.py [-h] [OPTIONS] │ (default: ) │ │ --launcher.env-file {None}|STR|PATHLIKE │ │ (default: None) │ -│ --launcher.propagate-exceptions, --launcher.no-propagate-exceptions │ -│ (default: True) │ ╰────────────────────────────────────────────────────────────────────────────╯ ╭─ model options ────────────────────────────────────────────────────────────╮ │ --model.name STR (required) │ diff --git a/docs/source/examples/scripts/accelerate_train.py b/docs/source/examples/scripts/accelerate_train.py index 40b572db..dd862f29 100644 --- a/docs/source/examples/scripts/accelerate_train.py +++ b/docs/source/examples/scripts/accelerate_train.py @@ -1,5 +1,5 @@ # /// script -# requires-python = ">=3.12" +# requires-python = ">=3.9" # dependencies = [ # "accelerate", # "datasets", diff --git a/docs/source/examples/scripts/deepspeed_config.json b/docs/source/examples/scripts/deepspeed_config.json deleted file mode 100644 index 953a5b34..00000000 --- a/docs/source/examples/scripts/deepspeed_config.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "zero_optimization": {"stage": 1, "reduce_bucket_size": 5e8}, - "optimizer": {"type": "AdamW", "params": {"betas": [0.9, 0.999], "eps": 1e-8}}, - "steps_per_print": 2000, - "train_batch_size": 8, -} diff --git a/docs/source/examples/scripts/deepspeed_help.txt b/docs/source/examples/scripts/deepspeed_help.txt index f558151d..ad4416af 100644 --- a/docs/source/examples/scripts/deepspeed_help.txt +++ b/docs/source/examples/scripts/deepspeed_help.txt @@ -1,9 +1,22 @@ -[2025-02-12 15:50:18,674] [INFO] [real_accelerator.py:222:get_accelerator] Setting ds_accelerator to cuda (auto detect) -Warning: The cache directory for DeepSpeed Triton autotune, /users/pcurtin1/.triton/autotune, appears to be on an NFS system. While this is generally acceptable, if you experience slowdowns or hanging when DeepSpeed exits, it is recommended to set the TRITON_CACHE_DIR environment variable to a non-NFS path. usage: deepspeed_train.py [-h] [OPTIONS] ╭─ options ──────────────────────────────────────────────────────────────────╮ │ -h, --help show this help message and exit │ +│ --model-name STR (required) │ +│ --deepspeed-config PATH │ +│ (required) │ +│ --checkpoint-dir PATH (required) │ +╰────────────────────────────────────────────────────────────────────────────╯ +╭─ dataset options ──────────────────────────────────────────────────────────╮ +│ --dataset.path STR (required) │ +│ --dataset.name {None}|STR │ +│ (default: None) │ +│ --dataset.split {None}|STR │ +│ (default: None) │ +│ --dataset.text-column STR │ +│ (default: text) │ +│ --dataset.num-samples {None}|INT │ +│ (default: None) │ ╰────────────────────────────────────────────────────────────────────────────╯ ╭─ launcher options ─────────────────────────────────────────────────────────╮ │ Useful for sequential invocations or for specifying arguments via CLI. │ @@ -25,23 +38,3 @@ usage: deepspeed_train.py [-h] [OPTIONS] │ --launcher.env-file {None}|STR|PATHLIKE │ │ (default: None) │ ╰────────────────────────────────────────────────────────────────────────────╯ -╭─ model options ────────────────────────────────────────────────────────────╮ -│ --model.name STR (required) │ -╰────────────────────────────────────────────────────────────────────────────╯ -╭─ dataset options ──────────────────────────────────────────────────────────╮ -│ --dataset.path STR (required) │ -│ --dataset.name {None}|STR │ -│ (default: None) │ -│ --dataset.split {None}|STR │ -│ (default: None) │ -│ --dataset.text-column STR │ -│ (default: text) │ -│ --dataset.num-samples {None}|INT │ -│ (default: None) │ -╰────────────────────────────────────────────────────────────────────────────╯ -╭─ deepspeed options ────────────────────────────────────────────────────────╮ -│ --deepspeed.deepspeed-config STR │ -│ (required) │ -│ --deepspeed.local-rank {None}|INT │ -│ (default: None) │ -╰────────────────────────────────────────────────────────────────────────────╯ diff --git a/docs/source/examples/scripts/deepspeed_train.py b/docs/source/examples/scripts/deepspeed_train.py index edd5427d..168fee17 100644 --- a/docs/source/examples/scripts/deepspeed_train.py +++ b/docs/source/examples/scripts/deepspeed_train.py @@ -1,22 +1,9 @@ -# /// script -# requires-python = ">=3.12" -# dependencies = [ -# "deepspeed", -# "datasets", -# "tensorboard", -# "torch", -# "torchrunx", -# "transformers", -# "tyro", -# ] -# /// - -# [docs:start-after] from __future__ import annotations import functools import os -from dataclasses import dataclass, InitVar +from dataclasses import dataclass +from pathlib import Path from typing import Annotated import deepspeed @@ -25,16 +12,11 @@ from datasets import load_dataset from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint from torch.utils.data import Dataset -from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedModel +from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedModel import torchrunx -@dataclass -class ModelConfig: - name: str - - @dataclass class DatasetConfig: path: str @@ -44,12 +26,6 @@ class DatasetConfig: num_samples: int | None = None -@dataclass -class DeepSpeedArgs: - deepspeed_config: str - local_rank: int | None = None - - def load_training_data( tokenizer_name: str, dataset_config: DatasetConfig, @@ -85,53 +61,47 @@ def load_training_data( ).map(lambda x: {"labels": x["input_ids"]}) -def train(model: PreTrainedModel, train_dataset: Dataset, deepspeed_args: DeepSpeedArgs) -> str: - deepspeed_args.local_rank = int(os.environ["LOCAL_RANK"]) - - model_engine, _, loader, _ = deepspeed.initialize( - args=deepspeed_args, +def train( + model: PreTrainedModel, + train_dataset: Dataset, + deepspeed_config: str | dict, + checkpoint_dir: str, +) -> None: + model_engine, _, data_loader, _ = deepspeed.initialize( model=model, model_parameters=model.parameters(), training_data=train_dataset, + config=deepspeed_config, ) model_engine.train() - for batch_idx, batch in enumerate(loader): - if batch_idx == 10: - break - device_batch = {k: torch.stack(v, dim=0).to(model_engine.device) for k, v in batch.items()} - model_engine.zero_grad() - - loss = model_engine(**device_batch).loss - print(f"Step {batch_idx}, loss: {loss.item()}", flush=True, end="") - model_engine.backward(loss) + for step, batch in enumerate(data_loader): + input_batch = {k: torch.stack(v).T.to(model_engine.device) for k, v in batch.items()} + loss = model_engine(**input_batch).loss + model_engine.backward(loss) model_engine.step() + print(f"Step {step}, loss: {loss.item()}", flush=True, end="") - checkpoint_dir = "output" model_engine.save_checkpoint(checkpoint_dir) - return checkpoint_dir - def main( - launcher: torchrunx.Launcher, - model_config: Annotated[ModelConfig, tyro.conf.arg(name="model")], + model_name: str, + deepspeed_config: Path, + checkpoint_dir: Path, dataset_config: Annotated[DatasetConfig, tyro.conf.arg(name="dataset")], - deepspeed_args: Annotated[DeepSpeedArgs, tyro.conf.arg(name="deepspeed")], + launcher: torchrunx.Launcher, ): - model = AutoModelForCausalLM.from_pretrained(model_config.name) - train_dataset = load_training_data( - tokenizer_name=model_config.name, dataset_config=dataset_config - ) + model = AutoModelForCausalLM.from_pretrained(model_name) + train_dataset = load_training_data(tokenizer_name=model_name, dataset_config=dataset_config) # Launch training - results = launcher.run(train, (model, train_dataset, deepspeed_args)) + launcher.run(train, (model, train_dataset, str(deepspeed_config), str(checkpoint_dir))) # Loading trained model from checkpoint - checkpoint_path = results.rank(0) - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_path) - trained_model = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(model_config.name)) + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) + trained_model = AutoModelForCausalLM.from_pretrained(model_name) trained_model.load_state_dict(state_dict) diff --git a/docs/source/examples/scripts/generate_help_menus.sh b/docs/source/examples/scripts/generate_help_menus.sh new file mode 100644 index 00000000..db639a16 --- /dev/null +++ b/docs/source/examples/scripts/generate_help_menus.sh @@ -0,0 +1,4 @@ +uv run docs/source/examples/scripts/transformers_train.py --help > docs/source/examples/scripts/transformers_help.txt +uv run docs/source/examples/scripts/deepspeed_train.py --help > docs/source/examples/scripts/deepspeed_help.txt +uv run docs/source/examples/scripts/lightning_train.py --help > docs/source/examples/scripts/lightning_help.txt +uv run docs/source/examples/scripts/accelerate_train.py --help > docs/source/examples/scripts/accelerate_help.txt diff --git a/docs/source/examples/scripts/lightning_train.py b/docs/source/examples/scripts/lightning_train.py index bebf4cc4..1684eb63 100644 --- a/docs/source/examples/scripts/lightning_train.py +++ b/docs/source/examples/scripts/lightning_train.py @@ -1,5 +1,5 @@ # /// script -# requires-python = ">=3.12" +# requires-python = ">=3.9" # dependencies = [ # "datasets", # "lightning", @@ -21,25 +21,13 @@ import lightning as L import torch -# from torchrunx.integrations.lightning import TorchrunxClusterEnvironment import tyro from datasets import load_dataset -from lightning.fabric.plugins.environments.torchelastic import ( - TorchElasticEnvironment, -) from torch.utils.data import Dataset -from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedModel +from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedModel import torchrunx - - -class TorchrunxClusterEnvironment(TorchElasticEnvironment): - """Compatible ClusterEnvironment for PyTorch Lightning.""" - - @staticmethod - def detect() -> bool: - """Force use of the TorchElasticEnvironment.""" - return True +from torchrunx.integrations.lightning import TorchrunxClusterEnvironment @dataclass @@ -142,10 +130,10 @@ def main( # Loading trained model from checkpoint checkpoint_path = results.rank(0) - dummy_model = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(model_config.name)) - model = CausalLMLightningWrapper(dummy_model) - model.load_state_dict(torch.load(checkpoint_path)["state_dict"]) - trained_model = model.model + dummy_model = AutoModelForCausalLM.from_pretrained(model_config.name) + trained_model = CausalLMLightningWrapper(dummy_model) + trained_model.load_state_dict(torch.load(checkpoint_path)["state_dict"]) + trained_model = trained_model.model if __name__ == "__main__": diff --git a/docs/source/examples/scripts/transformers_help.txt b/docs/source/examples/scripts/transformers_help.txt index dedeba6f..3678bc59 100644 --- a/docs/source/examples/scripts/transformers_help.txt +++ b/docs/source/examples/scripts/transformers_help.txt @@ -5,11 +5,11 @@ usage: transformers_train.py [-h] [OPTIONS] │ show this help message and exit │ ╰────────────────────────────────────────────────────────────────────────────╯ ╭─ launcher options ─────────────────────────────────────────────────────────╮ -│ Alias class for :func:`launch`. Refer to that function for documentation. │ +│ Useful for sequential invocations or for specifying arguments via CLI. │ │ ────────────────────────────────────────────────────────────────────────── │ │ --launcher.hostnames {[STR [STR ...]]}|{auto,slurm} │ │ (default: auto) │ -│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{auto} │ +│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ │ (default: auto) │ │ --launcher.ssh-config-file {None}|STR|PATHLIKE │ │ (default: None) │ @@ -24,8 +24,6 @@ usage: transformers_train.py [-h] [OPTIONS] │ (default: ) │ │ --launcher.env-file {None}|STR|PATHLIKE │ │ (default: None) │ -│ --launcher.propagate-exceptions, --launcher.no-propagate-exceptions │ -│ (default: True) │ ╰────────────────────────────────────────────────────────────────────────────╯ ╭─ model options ────────────────────────────────────────────────────────────╮ │ --model.name STR │ diff --git a/docs/source/examples/scripts/transformers_train.py b/docs/source/examples/scripts/transformers_train.py index 5b313697..f93eca65 100644 --- a/docs/source/examples/scripts/transformers_train.py +++ b/docs/source/examples/scripts/transformers_train.py @@ -1,5 +1,5 @@ # /// script -# requires-python = ">=3.12" +# requires-python = ">=3.9" # dependencies = [ # "datasets", # "tensorboard", diff --git a/docs/source/examples/transformers.md b/docs/source/examples/transformers.md index 705aedd1..2e0fb9c0 100644 --- a/docs/source/examples/transformers.md +++ b/docs/source/examples/transformers.md @@ -12,27 +12,26 @@ Here's an example script that uses `torchrunx` with [`transformers.Trainer`](htt ``` - - `--launcher`: [torchrunx.Launcher](../api.md#torchrunx.Launcher) - - `--model`: [`transformers.AutoModelForCausalLM`](https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoModelForCausalLM) - - `--dataset`: [`datasets.load_dataset`](https://huggingface.co/docs/datasets/en/package_reference/loading_methods#datasets.load_dataset) - - `--trainer`: [`transformers.TrainingArguments`](https://huggingface.co/docs/transformers/en/main_classes/trainer#transformers.TrainingArguments) +## Training GPT-2 on WikiText in One Line -Required: `--model.name`, `--dataset.path`, `--trainer.output-dir` +The following command runs our script end-to-end: installing all dependencies, downloading model and data, training, logging to TensorBoard, etc. For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. -### Training GPT-2 on WikiText in One Line - -The following command runs our script end-to-end: installing all dependencies, downloading model and data, training, logging to TensorBoard, etc. Pre-requisite: [uv](https://docs.astral.sh/uv) +Pre-requisite: [uv](https://docs.astral.sh/uv) ```bash -uv run https://torchrun.xyz/transformers_train.py \ +uv run --python "3.12" https://torchrun.xyz/transformers_train.py \ --model.name gpt2 \ --dataset.path "Salesforce/wikitext" --dataset.name "wikitext-2-v1" --dataset.split "train" --dataset.num-samples 80 \ --trainer.output-dir output --trainer.per-device-train-batch-size 4 --trainer.report-to tensorboard ``` -For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. +You can visualize the logs with: + +```bash +uv run --with tensorboard tensorboard --logdir output/runs +``` -### Script +## Script ```{eval-rst} .. literalinclude:: ./scripts/transformers_train.py diff --git a/docs/source/index.rst b/docs/source/index.rst index cbeb9236..3097ce23 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -22,10 +22,9 @@ :hidden: ./examples/transformers.md - ./examples/accelerate.md ./examples/deepspeed.md ./examples/lightning.md - ./examples/composer.md + ./examples/accelerate.md .. sidebar-links:: :github: diff --git a/src/torchrunx/utils/logging.py b/src/torchrunx/utils/logging.py index 64c20e18..ca7b2ded 100644 --- a/src/torchrunx/utils/logging.py +++ b/src/torchrunx/utils/logging.py @@ -1,4 +1,4 @@ -"""Utilities for intercepting logs in worker processes and handling these in the Launcher.""" # noqa: A005 +"""Utilities for intercepting logs in worker processes and handling these in the Launcher.""" from __future__ import annotations From 7fad25f71c5db4051673dd03c0b82c50a81812dc Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 15 Feb 2025 00:34:07 -0500 Subject: [PATCH 112/141] edit docs --- docs/source/examples/accelerate.md | 4 +++- docs/source/examples/deepspeed.md | 6 ++---- docs/source/examples/lightning.md | 4 +++- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/docs/source/examples/accelerate.md b/docs/source/examples/accelerate.md index a6f7645f..79ebf8eb 100644 --- a/docs/source/examples/accelerate.md +++ b/docs/source/examples/accelerate.md @@ -14,7 +14,9 @@ Here's an example script that uses `torchrunx` with [Accelerate](https://hugging ## Training GPT-2 on WikiText in One Line -The following command installs dependencies and runs our script (for example, with `GPT-2` on `WikiText`). For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. Pre-requisite: [uv](https://docs.astral.sh/uv) +The following command installs dependencies and runs our script (for example, with `GPT-2` on `WikiText`). For multi-node training (+ if not using SLURM), you should also pass e.g. `--launcher.hostnames node1 node2`. + +Pre-requisite: [uv](https://docs.astral.sh/uv) ```bash uv run --python "3.12" https://torchrun.xyz/accelerate_train.py \ diff --git a/docs/source/examples/deepspeed.md b/docs/source/examples/deepspeed.md index 4895ff05..207af460 100644 --- a/docs/source/examples/deepspeed.md +++ b/docs/source/examples/deepspeed.md @@ -16,11 +16,9 @@ Here's an example script that uses `torchrunx` with [DeepSpeed](https://www.deep Deepspeed requires additional (non-Python) dependencies. Use the following commands to set up a project. Source: [Apoorv's Blog — Managing Project Dependencies](https://blog.apoorvkh.com/posts/project-dependencies.html) -```bash -# Install pixi -curl -fsSL https://pixi.sh/install.sh | bash +Pre-requisite: [pixi](https://pixi.sh) -# Create a project +```bash pixi init my-project --format pyproject cd my-project diff --git a/docs/source/examples/lightning.md b/docs/source/examples/lightning.md index f33670b5..21599da7 100644 --- a/docs/source/examples/lightning.md +++ b/docs/source/examples/lightning.md @@ -14,7 +14,9 @@ Here's an example script that uses `torchrunx` with [PyTorch Lightning](https:// ## Training GPT-2 on WikiText in One Line -The following command runs our script end-to-end: installing all dependencies, downloading model and data, training, etc. Pre-requisite: [uv](https://docs.astral.sh/uv) +The following command runs our script end-to-end: installing all dependencies, downloading model and data, training, etc. + +Pre-requisite: [uv](https://docs.astral.sh/uv) ```bash uv run --python "3.12" https://torchrun.xyz/lightning_train.py \ From 42fa349b354019b3b6121e8bb0d07aa91590f61c Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 15 Feb 2025 00:46:29 -0500 Subject: [PATCH 113/141] no miliseconds in logging --- src/torchrunx/utils/logging.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/torchrunx/utils/logging.py b/src/torchrunx/utils/logging.py index ca7b2ded..643c9681 100644 --- a/src/torchrunx/utils/logging.py +++ b/src/torchrunx/utils/logging.py @@ -1,4 +1,4 @@ -"""Utilities for intercepting logs in worker processes and handling these in the Launcher.""" +"""Utilities for intercepting logs in worker processes and handling these in the Launcher.""" # noqa: A005 from __future__ import annotations @@ -75,6 +75,7 @@ def stream_handler( "%(asctime)s:%(levelname)s:%(hostname)s[%(local_rank)s]: %(message)s" if local_rank is not None else "%(asctime)s:%(levelname)s:%(hostname)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", ), ) return handler @@ -89,8 +90,9 @@ def file_handler( """Handler builder function for writing logs from specified hostname/rank to a file.""" handler = logging.FileHandler(file_path) add_filter_to_handler(handler, hostname, local_rank, log_level=log_level) - formatter = logging.Formatter("%(asctime)s:%(levelname)s: %(message)s") - handler.setFormatter(formatter) + handler.setFormatter( + logging.Formatter("%(asctime)s:%(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S") + ) return handler From 23bd11b2c21205f30651dcbe7a8d12b2ec2b8c5c Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 15 Feb 2025 14:22:11 -0500 Subject: [PATCH 114/141] added type checking based on function arguments/returns; removed launch(), which is not compatible --- src/torchrunx/launcher.py | 310 +++++++---------------------- src/torchrunx/utils/comm.py | 49 +++-- src/torchrunx/utils/environment.py | 143 ++++++++++++- src/torchrunx/utils/logging.py | 45 ++--- 4 files changed, 264 insertions(+), 283 deletions(-) diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 622ca0e4..930b5922 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -2,123 +2,79 @@ from __future__ import annotations -__all__ = ["LaunchResult", "Launcher", "launch"] +__all__ = ["LaunchResult", "Launcher"] -import fnmatch -import ipaddress import itertools -import logging -import os -import shlex import socket -import subprocess -import sys from dataclasses import dataclass from functools import partial -from logging import Handler from multiprocessing import Event, Process -from pathlib import Path -from typing import Any, Callable, Literal +from typing import TYPE_CHECKING, Callable, Generic, Literal, TypeVar -import fabric import torch.distributed as dist -from typing_extensions import Self +from typing_extensions import ParamSpec, Self from .utils.comm import ( LauncherAgentGroup, LauncherPayload, get_open_port, ) -from .utils.environment import auto_hosts, slurm_hosts -from .utils.errors import ( - ExceptionFromWorker, - WorkerFailedError, +from .utils.environment import ( + build_launch_command, + execute_command, + resolve_hostnames, + resolve_workers_per_host, ) +from .utils.errors import ExceptionFromWorker, WorkerFailedError from .utils.logging import LoggingServerArgs, start_logging_server - -def launch( - func: Callable, - args: tuple | None = None, - kwargs: dict[str, Any] | None = None, - *, - hostnames: list[str] | Literal["auto", "slurm"] = "auto", - workers_per_host: int | list[int] | Literal["auto"] = "auto", - ssh_config_file: str | os.PathLike | None = None, - backend: Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None = "auto", - timeout: int = 600, - default_env_vars: tuple[str, ...] = ( - "PATH", - "LD_LIBRARY", - "LIBRARY_PATH", - "PYTHON*", - "CUDA*", - "TORCH*", - "PYTORCH*", - "NCCL*", - ), - extra_env_vars: tuple[str, ...] = (), - env_file: str | os.PathLike | None = None, - propagate_exceptions: bool = True, - handler_factory: Callable[[], list[Handler]] | Literal["auto"] | None = "auto", -) -> LaunchResult: - """Distribute and parallelize a function onto specified nodes and workers. - - Arguments: - func: Function to replicate on each node/worker. - args: Positional arguments for ``func``. Default: :py:obj:`None`. - kwargs: Keyword arguments for ``func``. Default: :py:obj:`None`. - hostnames: Nodes on which to launch the function. - Default: ``"auto"`` (infer from localhost or SLURM). - workers_per_host: Number of processes to run (e.g. # of GPUs) per node. - Default: ``"auto"`` (number of GPUs per host). - ssh_config_file: Path to an SSH configuration file for connecting to nodes. - Default: ``"~/.ssh/config"`` or ``"/etc/ssh/ssh_config"``. - backend: `Backend `_ - for worker process group. Set `None` to disable. - Default: ``"auto"`` (NCCL if GPU or GLOO if CPU). - timeout: Worker process group timeout (seconds). - Default: ``600``. - default_env_vars: Environment variables to copy from the launcher process to workers. - Supports bash pattern matching syntax. - Default: ``("PATH", "LD_LIBRARY", "LIBRARY_PATH", "PYTHON*", "CUDA*", "TORCH*", - "PYTORCH*", "NCCL*")``. - extra_env_vars: Additional user-specified environment variables to copy. - Default: ``()``. - env_file: Path to a file (e.g., ``.env``) with additional environment variables to copy. - Default: :py:obj:`None`. - propagate_exceptions: Raise exceptions from worker processes in the launcher. - If false, raises :exc:`WorkerFailedError` instead. - Default: :py:obj:`True`. - handler_factory: Function to customize processing of agent and worker logs with handlers. - Default: ``"auto"`` (see `custom logging `_). - - Raises: - RuntimeError: If there are configuration issues. - Exception: Any exception raised in a worker process is propagated. - WorkerFailedError: If a worker fails (e.g. from a segmentation fault) - or raises an exception and ``propagate_exceptions=False``. - AgentFailedError: If an agent fails, e.g. from an OS signal. - """ - return ( - Launcher( - hostnames=hostnames, - workers_per_host=workers_per_host, - ssh_config_file=ssh_config_file, - backend=backend, - timeout=timeout, - default_env_vars=default_env_vars, - extra_env_vars=extra_env_vars, - env_file=env_file, - propagate_exceptions=propagate_exceptions, - ) - .set_handler_factory(handler_factory) - .run( - func, - args, - kwargs, - ) - ) +if TYPE_CHECKING: + import os + from logging import Handler + + +"""Distribute and parallelize a function onto specified nodes and workers. + +Arguments: + func: Function to replicate on each node/worker. + args: Positional arguments for ``func``. Default: :py:obj:`None`. + kwargs: Keyword arguments for ``func``. Default: :py:obj:`None`. + hostnames: Nodes on which to launch the function. + Default: ``"auto"`` (infer from localhost or SLURM). + workers_per_host: Number of processes to run (e.g. # of GPUs) per node. + Default: ``"auto"`` (number of GPUs per host). + ssh_config_file: Path to an SSH configuration file for connecting to nodes. + Default: ``"~/.ssh/config"`` or ``"/etc/ssh/ssh_config"``. + backend: `Backend `_ + for worker process group. Set `None` to disable. + Default: ``"auto"`` (NCCL if GPU or GLOO if CPU). + timeout: Worker process group timeout (seconds). + Default: ``600``. + default_env_vars: Environment variables to copy from the launcher process to workers. + Supports bash pattern matching syntax. + Default: ``("PATH", "LD_LIBRARY", "LIBRARY_PATH", "PYTHON*", "CUDA*", "TORCH*", + "PYTORCH*", "NCCL*")``. + extra_env_vars: Additional user-specified environment variables to copy. + Default: ``()``. + env_file: Path to a file (e.g., ``.env``) with additional environment variables to copy. + Default: :py:obj:`None`. + propagate_exceptions: Raise exceptions from worker processes in the launcher. + If false, raises :exc:`WorkerFailedError` instead. + Default: :py:obj:`True`. + handler_factory: Function to customize processing of agent and worker logs with handlers. + Default: ``"auto"`` (see `custom logging `_). + +Raises: + RuntimeError: If there are configuration issues. + Exception: Any exception raised in a worker process is propagated. + WorkerFailedError: If a worker fails (e.g. from a segmentation fault) + or raises an exception and ``propagate_exceptions=False``. + AgentFailedError: If an agent fails, e.g. from an OS signal. +""" + + +FunctionP = ParamSpec("FunctionP") +FunctionR = TypeVar("FunctionR") @dataclass @@ -172,17 +128,17 @@ def set_handler_factory( def run( # noqa: C901, PLR0912 self, - func: Callable, - args: tuple | None = None, - kwargs: dict[str, Any] | None = None, - ) -> LaunchResult: + func: Callable[FunctionP, FunctionR], + *args: FunctionP.args, + **kwargs: FunctionP.kwargs, + ) -> LaunchResult[FunctionR | WorkerFailedError | ExceptionFromWorker]: """Launch a function using class configuration.""" if not dist.is_available(): msg = "The torch.distributed package is not available." raise RuntimeError(msg) - hostnames: list[str] = _resolve_hostnames(self.hostnames) - workers_per_host: list[int] = _resolve_workers_per_host(hostnames, self.workers_per_host) + hostnames: list[str] = resolve_hostnames(self.hostnames) + workers_per_host: list[int] = resolve_workers_per_host(hostnames, self.workers_per_host) launcher_hostname = socket.getfqdn() launcher_port = get_open_port() @@ -203,8 +159,6 @@ def run( # noqa: C901, PLR0912 logging_port=logging_port, hostnames=hostnames, workers_per_host=workers_per_host, - log_dir=Path(os.environ.get("TORCHRUNX_LOG_DIR", "torchrunx_logs")), - log_level=logging._nameToLevel[os.environ.get("TORCHRUNX_LOG_LEVEL", "INFO")], # noqa: SLF001 ) stop_logging_event = Event() @@ -220,8 +174,8 @@ def run( # noqa: C901, PLR0912 # Start agents on each node for i, hostname in enumerate(hostnames): - _execute_command( - command=_build_launch_command( + execute_command( + command=build_launch_command( launcher_hostname=launcher_hostname, launcher_port=launcher_port, logger_port=logging_port, @@ -237,7 +191,7 @@ def run( # noqa: C901, PLR0912 # Initialize launcher-agent process group # ranks = (launcher, agent_{hostnames[0]}, ..., agent[-1]) - launcher_agent_group = LauncherAgentGroup( + launcher_agent_group = LauncherAgentGroup[FunctionR]( launcher_hostname=launcher_hostname, launcher_port=launcher_port, world_size=world_size, @@ -294,7 +248,7 @@ def run( # noqa: C901, PLR0912 # cleanup: SIGTERM all agents if agent_payloads is not None: for agent_payload, agent_hostname in zip(agent_payloads, hostnames): - _execute_command( + execute_command( command=f"kill {agent_payload.process_id}", hostname=agent_hostname, ssh_config_file=self.ssh_config_file, @@ -305,141 +259,23 @@ def run( # noqa: C901, PLR0912 return LaunchResult(hostnames=hostnames, return_values=return_values) -@dataclass -class LaunchResult: +class LaunchResult(Generic[FunctionR]): """Container for objects returned from workers after successful launches.""" - def __init__(self, hostnames: list[str], return_values: list[list[Any]]) -> None: + results: dict[str, list[FunctionR]] + + def __init__(self, hostnames: list[str], return_values: list[list[FunctionR]]) -> None: """Initialize from corresponding lists of hostnames and worker return values.""" - self.results: dict[str, list[Any]] = dict(zip(hostnames, return_values)) + self.results: dict[str, list[FunctionR]] = dict(zip(hostnames, return_values)) - def index(self, hostname: str, locak_rank: int) -> Any: + def index(self, hostname: str, locak_rank: int) -> FunctionR: """Get return value from worker by host and local rank.""" return self.results[hostname][locak_rank] - def rank(self, i: int) -> Any: + def rank(self, i: int) -> FunctionR: """Get return value from worker by global rank.""" for results_per_host in self.results.values(): if i < len(results_per_host): return results_per_host[i] i -= len(results_per_host) raise IndexError - - -def _resolve_hostnames(hostnames: list[str] | Literal["auto", "slurm"]) -> list[str]: - if hostnames == "auto": - return auto_hosts() - if hostnames == "slurm": - return slurm_hosts() - return hostnames - - -def _resolve_workers_per_host( - hostnames: list[str], - workers_per_host: int | list[int] | Literal["auto"], -) -> list[int]: - if isinstance(workers_per_host, int): - return [workers_per_host] * len(hostnames) - - if workers_per_host == "auto": - python = shlex.quote(sys.executable) - command = f"{python} -c \"import torch; print(torch.cuda.device_count(), end='')\"" - gpus_per_host = [ - int(_execute_command(command, hostname, return_stdout_stderr=True)[0]) - for hostname in hostnames - ] - if any(g == 0 for g in gpus_per_host): - msg = 'workers_per_host="auto", but no GPUs detected on at least one host.' - raise RuntimeError(msg) - return gpus_per_host - - return workers_per_host - - -def _build_launch_command( - launcher_hostname: str, - launcher_port: int, - logger_port: int, - world_size: int, - rank: int, - env_vars: tuple[str, ...], - env_file: str | os.PathLike | None, -) -> str: - # shlex.quote prevents shell injection here (resolves S602 in execute_command) - - commands = [] - - current_dir = shlex.quote(str(Path.cwd())) - commands.append("cd " + current_dir) - - env_exports = [] - for k, v in os.environ.items(): - if any(fnmatch.fnmatch(k, e) for e in env_vars): - env_exports.append(shlex.quote(f"{k}={v}")) - - if len(env_exports) > 0: - commands.append("export " + " ".join(env_exports)) - - if env_file is not None: - commands.append("source " + shlex.quote(str(env_file))) - - python = shlex.quote(sys.executable) - launcher_hostname = shlex.quote(launcher_hostname) - - commands.append( - f"{python} -u -m torchrunx " - f"--launcher-hostname {launcher_hostname} " - f"--launcher-port {launcher_port} " - f"--logger-port {logger_port} " - f"--world-size {world_size} " - f"--rank {rank}", - ) - - return " && ".join(commands) - - -def _execute_command( - command: str, - hostname: str, - *, - ssh_config_file: str | os.PathLike | None = None, - return_stdout_stderr: bool = False, -) -> tuple[str, str]: - is_localhost = True - _hostname_or_ip = hostname - try: - _ip = ipaddress.ip_address(_hostname_or_ip) - except ValueError: - _ip = ipaddress.ip_address(socket.gethostbyname(_hostname_or_ip)) - if not _ip.is_loopback: - # compare local interface addresses between host and localhost - _host_addrs = [addr[4][0] for addr in socket.getaddrinfo(str(_ip), None)] - _localhost_addrs = [addr[4][0] for addr in socket.getaddrinfo(socket.gethostname(), None)] - is_localhost = len(set(_host_addrs) & set(_localhost_addrs)) > 0 - - if is_localhost: - # S602: subprocess.Popen is called with shell=True (https://docs.python.org/3.9/library/subprocess.html#security-considerations) - # Made sure to shlex.quote arguments in build_command to prevent shell injection - process = subprocess.Popen( # noqa: S602 - command, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE - ) - - if return_stdout_stderr: - stdout, stderr = process.communicate() - return stdout, stderr - else: - runtime_ssh_path = ssh_config_file - if isinstance(ssh_config_file, os.PathLike): - runtime_ssh_path = str(ssh_config_file) - - with fabric.Connection( - host=hostname, - config=fabric.Config(runtime_ssh_path=runtime_ssh_path), - ) as conn: - promise = conn.run(command, asynchronous=True, hide=True) - - if return_stdout_stderr: - results = promise.join() - return results.stdout, results.stderr - - return ("", "") diff --git a/src/torchrunx/utils/comm.py b/src/torchrunx/utils/comm.py index 7634c9c1..da68563f 100644 --- a/src/torchrunx/utils/comm.py +++ b/src/torchrunx/utils/comm.py @@ -15,7 +15,7 @@ import socket from contextlib import closing from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Callable, Literal +from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, TypeVar import cloudpickle import torch.distributed as dist @@ -34,8 +34,12 @@ def get_open_port() -> int: return s.getsockname()[1] +ObjectT = TypeVar("ObjectT", bound=Any) +FunctionR = TypeVar("FunctionR") + + @dataclass -class LauncherAgentGroup: +class LauncherAgentGroup(Generic[FunctionR]): """Initializes a GLOO distributed process group between launcher and all agents.""" launcher_hostname: str @@ -62,25 +66,24 @@ def __post_init__(self) -> None: timeout=datetime.timedelta(seconds=30), ) - def _serialize(self, obj: Any) -> bytes: - return cloudpickle.dumps(obj) - - def _deserialize(self, serialized: bytes) -> Any: - return cloudpickle.loads(serialized) - - def _all_gather(self, obj: Any) -> list: + def _all_gather(self, obj: ObjectT) -> list[ObjectT]: """Gather object from each rank to list (in rank-order). Raises: AgentFailedError: if any agent fails (observed by this communication). """ try: - rank_obj = self._serialize((self.rank, obj)) - rank_obj_list = [b""] * self.world_size - # raises RuntimeError if timeout - dist.all_gather_object(object_list=rank_obj_list, obj=rank_obj, group=self.group) - rank_obj_list = sorted([self._deserialize(o) for o in rank_obj_list]) - return [obj for _, obj in sorted(rank_obj_list)] + rank_obj = cloudpickle.dumps((self.rank, obj)) + all_gather_list = [b""] * self.world_size + + dist.all_gather_object( + object_list=all_gather_list, obj=rank_obj, group=self.group + ) # raises RuntimeError if timeout + + rank_obj_list: list[tuple[int, ObjectT]] = sorted( + [cloudpickle.loads(o) for o in all_gather_list] + ) + return [obj for _, obj in rank_obj_list] except RuntimeError as e: # occurs if launcher or any agent dies and communication times out raise AgentFailedError from e @@ -91,13 +94,17 @@ def sync_payloads( ) -> tuple[LauncherPayload, list[AgentPayload]]: """All-gather payloads across launcher and all agents.""" payloads = self._all_gather(payload) - launcher_payload = payloads[0] - agent_payloads = payloads[1:] + launcher_payload: LauncherPayload = payloads[0] # pyright: ignore [reportAssignmentType] + agent_payloads: list[AgentPayload] = payloads[1:] # pyright: ignore [reportAssignmentType] return launcher_payload, agent_payloads - def sync_agent_statuses(self, status: AgentStatus | None) -> list[AgentStatus]: + def sync_agent_statuses( + self, status: AgentStatus[FunctionR] | None + ) -> list[AgentStatus[FunctionR]]: """All-gather agent statuses across launcher and all agents.""" - return self._all_gather(status)[1:] # [0] is launcher (status=None) + # only launcher has status = None + agent_statuses: list[AgentStatus[FunctionR]] = self._all_gather(status)[1:] # pyright: ignore [reportAssignmentType] + return agent_statuses def shutdown(self) -> None: """Terminate process group.""" @@ -126,7 +133,7 @@ class AgentPayload: @dataclass -class AgentStatus: +class AgentStatus(Generic[FunctionR]): """Status of each agent (to be synchronized in LauncherAgentGroup). Attributes: @@ -135,7 +142,7 @@ class AgentStatus: """ state: Literal["running", "failed", "done"] - return_values: list[Any | WorkerFailedError | ExceptionFromWorker] = field( + return_values: list[FunctionR | WorkerFailedError | ExceptionFromWorker] = field( default_factory=list ) # indexed by local rank diff --git a/src/torchrunx/utils/environment.py b/src/torchrunx/utils/environment.py index ca5f0e7c..e070d0a4 100644 --- a/src/torchrunx/utils/environment.py +++ b/src/torchrunx/utils/environment.py @@ -2,10 +2,36 @@ from __future__ import annotations -__all__ = ["auto_hosts", "in_slurm_job", "slurm_hosts"] +__all__ = [ + "auto_hosts", + "build_launch_command", + "execute_command", + "in_slurm_job", + "resolve_hostnames", + "resolve_workers_per_host", + "slurm_hosts", +] +import fnmatch +import ipaddress import os +import shlex +import socket import subprocess +import sys +from pathlib import Path +from typing import Literal + +import fabric + + +def resolve_hostnames(hostnames: list[str] | Literal["auto", "slurm"]) -> list[str]: + """Resolve hosts from environment.""" + if hostnames == "auto": + return auto_hosts() + if hostnames == "slurm": + return slurm_hosts() + return hostnames def auto_hosts() -> list[str]: @@ -27,3 +53,118 @@ def slurm_hosts() -> list[str]: raise RuntimeError(msg) return subprocess.check_output(["scontrol", "show", "hostnames"]).decode().strip().split("\n") + + +def resolve_workers_per_host( + hostnames: list[str], + workers_per_host: int | list[int] | Literal["auto"], +) -> list[int]: + """Resolve number of workers per host. If "auto", set to number of GPUs on each host.""" + if isinstance(workers_per_host, int): + return [workers_per_host] * len(hostnames) + + if workers_per_host == "auto": + # Execute command to count GPUs on each host + python = shlex.quote(sys.executable) + command = f"{python} -c \"import torch; print(torch.cuda.device_count(), end='')\"" + gpus_per_host = [ + int(execute_command(command, hostname, return_stdout_stderr=True)[0]) + for hostname in hostnames + ] + if any(g == 0 for g in gpus_per_host): + msg = 'workers_per_host="auto", but no GPUs detected on at least one host.' + raise RuntimeError(msg) + return gpus_per_host + + return workers_per_host + + +def build_launch_command( + launcher_hostname: str, + launcher_port: int, + logger_port: int, + world_size: int, + rank: int, + env_vars: tuple[str, ...], + env_file: str | os.PathLike | None, +) -> str: + """Generator for command to launch torchrunx on an agent.""" + # shlex.quote prevents shell injection here (resolves S602 in execute_command) + + commands = [] + + current_dir = shlex.quote(str(Path.cwd())) + commands.append("cd " + current_dir) + + env_exports = [] + for k, v in os.environ.items(): + if any(fnmatch.fnmatch(k, e) for e in env_vars): + env_exports.append(shlex.quote(f"{k}={v}")) + + if len(env_exports) > 0: + commands.append("export " + " ".join(env_exports)) + + if env_file is not None: + commands.append("source " + shlex.quote(str(env_file))) + + python = shlex.quote(sys.executable) + launcher_hostname = shlex.quote(launcher_hostname) + + commands.append( + f"{python} -u -m torchrunx " + f"--launcher-hostname {launcher_hostname} " + f"--launcher-port {launcher_port} " + f"--logger-port {logger_port} " + f"--world-size {world_size} " + f"--rank {rank}", + ) + + return " && ".join(commands) + + +def execute_command( + command: str, + hostname: str, + *, + ssh_config_file: str | os.PathLike | None = None, + return_stdout_stderr: bool = False, +) -> tuple[str, str]: + """Run a command on local or remote host (using SSH).""" + is_localhost = True + _hostname_or_ip = hostname + try: + _ip = ipaddress.ip_address(_hostname_or_ip) + except ValueError: + _ip = ipaddress.ip_address(socket.gethostbyname(_hostname_or_ip)) + if not _ip.is_loopback: + # compare local interface addresses between host and localhost + _host_addrs = [addr[4][0] for addr in socket.getaddrinfo(str(_ip), None)] + _localhost_addrs = [addr[4][0] for addr in socket.getaddrinfo(socket.gethostname(), None)] + is_localhost = len(set(_host_addrs) & set(_localhost_addrs)) > 0 + + if is_localhost: + # S602: subprocess.Popen is called with shell=True (https://docs.python.org/3.9/library/subprocess.html#security-considerations) + # Made sure to shlex.quote arguments in build_command to prevent shell injection + process = subprocess.Popen( # noqa: S602 + command, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + + if return_stdout_stderr: + stdout, stderr = process.communicate() + return stdout, stderr + else: + runtime_ssh_path = ssh_config_file + if isinstance(ssh_config_file, os.PathLike): + runtime_ssh_path = str(ssh_config_file) + + with fabric.Connection( + host=hostname, + config=fabric.Config(runtime_ssh_path=runtime_ssh_path), + ) as conn: + promise = conn.run(command, asynchronous=True, hide=True) + + if return_stdout_stderr: + results = promise.join() + return results.stdout, results.stderr + + return ("", "") diff --git a/src/torchrunx/utils/logging.py b/src/torchrunx/utils/logging.py index 643c9681..d8669ff7 100644 --- a/src/torchrunx/utils/logging.py +++ b/src/torchrunx/utils/logging.py @@ -16,6 +16,7 @@ import datetime import logging +import os import pickle import signal import struct @@ -33,7 +34,6 @@ from typing_extensions import Self if TYPE_CHECKING: - import os from multiprocessing.synchronize import Event as EventClass ## Handler utilities @@ -64,6 +64,26 @@ def _filter(record: WorkerLogRecord) -> bool: handler.addFilter(_filter) # pyright: ignore [reportArgumentType] +def default_handlers( + hostnames: list[str], + workers_per_host: list[int], + log_level: int = logging.INFO, +) -> list[Handler]: + """Default :mod:`logging.Handler`s for ``log_handlers="auto"`` in :mod:`torchrunx.launch`. + + Logs for ``host[0]`` and its ``local_rank[0]`` worker are written to launcher process stdout. + Logs for all agents/workers are written to files in ``log_dir`` (named by timestamp, hostname, + local_rank). + """ + log_dir = Path(os.environ.get("TORCHRUNX_LOG_DIR", "torchrunx_logs")) + log_level = logging._nameToLevel[os.environ.get("TORCHRUNX_LOG_LEVEL", "INFO")] # noqa: SLF001 + return [ + stream_handler(hostname=hostnames[0], local_rank=None, log_level=log_level), + stream_handler(hostname=hostnames[0], local_rank=0, log_level=log_level), + *file_handlers(hostnames, workers_per_host, log_dir=log_dir, log_level=log_level), + ] + + def stream_handler( hostname: str, local_rank: int | None, log_level: int = logging.NOTSET ) -> Handler: @@ -121,25 +141,6 @@ def file_handlers( return handlers -def default_handlers( - hostnames: list[str], - workers_per_host: list[int], - log_dir: str | os.PathLike = Path("torchrunx_logs"), - log_level: int = logging.INFO, -) -> list[Handler]: - """Default :mod:`logging.Handler`s for ``log_handlers="auto"`` in :mod:`torchrunx.launch`. - - Logs for ``host[0]`` and its ``local_rank[0]`` worker are written to launcher process stdout. - Logs for all agents/workers are written to files in ``log_dir`` (named by timestamp, hostname, - local_rank). - """ - return [ - stream_handler(hostname=hostnames[0], local_rank=None, log_level=log_level), - stream_handler(hostname=hostnames[0], local_rank=0, log_level=log_level), - *file_handlers(hostnames, workers_per_host, log_dir=log_dir, log_level=log_level), - ] - - ## Launcher utilities @@ -193,8 +194,6 @@ class LoggingServerArgs: logging_port: int hostnames: list[str] workers_per_host: list[int] - log_dir: str | os.PathLike - log_level: int def serialize(self) -> bytes: """Serialize :class:`LoggingServerArgs` for passing to a new process.""" @@ -220,8 +219,6 @@ def start_logging_server( log_handlers = default_handlers( hostnames=args.hostnames, workers_per_host=args.workers_per_host, - log_dir=args.log_dir, - log_level=args.log_level, ) elif isinstance(args.handler_factory, Callable): log_handlers = args.handler_factory() From 4e45f38e5f517d57ab6c00577989857f9690e839 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 15 Feb 2025 14:41:19 -0500 Subject: [PATCH 115/141] fixed launcher.run return type --- src/torchrunx/__init__.py | 3 +-- src/torchrunx/launcher.py | 18 ++++++++++-------- tests/test_ci.py | 17 +++++++++-------- tests/test_func.py | 5 ++--- tests/test_submitit.py | 2 +- tests/test_train_gpu.py | 5 ++--- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/src/torchrunx/__init__.py b/src/torchrunx/__init__.py index 3856f589..31ca0cda 100644 --- a/src/torchrunx/__init__.py +++ b/src/torchrunx/__init__.py @@ -1,6 +1,6 @@ """API for our torchrunx library.""" -from .launcher import Launcher, LaunchResult, launch +from .launcher import Launcher, LaunchResult from .utils.errors import AgentFailedError, WorkerFailedError __all__ = [ @@ -8,5 +8,4 @@ "LaunchResult", "Launcher", "WorkerFailedError", - "launch", ] diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 930b5922..3d7a8fda 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -131,7 +131,7 @@ def run( # noqa: C901, PLR0912 func: Callable[FunctionP, FunctionR], *args: FunctionP.args, **kwargs: FunctionP.kwargs, - ) -> LaunchResult[FunctionR | WorkerFailedError | ExceptionFromWorker]: + ) -> LaunchResult[FunctionR]: """Launch a function using class configuration.""" if not dist.is_available(): msg = "The torch.distributed package is not available." @@ -226,13 +226,16 @@ def run( # noqa: C901, PLR0912 # raises specific exception if any agent fails for s in agent_statuses: - for value in s.return_values: - if isinstance(value, ExceptionFromWorker): + for v in s.return_values: + if isinstance(v, ExceptionFromWorker): if self.propagate_exceptions: - raise value.exception - raise WorkerFailedError from value.exception - if isinstance(value, WorkerFailedError): - raise value + raise v.exception + raise WorkerFailedError from v.exception + if isinstance(v, WorkerFailedError): + raise v + + # confirmed that no return values are exceptions + return_values: list[list[FunctionR]] = [s.return_values for s in agent_statuses] # pyright: ignore [reportAssignmentType] if all(s.state == "done" for s in agent_statuses): break @@ -255,7 +258,6 @@ def run( # noqa: C901, PLR0912 ) # if launch is successful: return objects from workers - return_values = [s.return_values for s in agent_statuses] return LaunchResult(hostnames=hostnames, return_values=return_values) diff --git a/tests/test_ci.py b/tests/test_ci.py index 98cce6ff..27e4223a 100644 --- a/tests/test_ci.py +++ b/tests/test_ci.py @@ -32,11 +32,10 @@ def dist_func() -> torch.Tensor: tmp = tempfile.mkdtemp() os.environ["TORCHRUNX_DIR"] = tmp - r = trx.launch( - dist_func, + r = trx.Launcher( workers_per_host=2, - backend="gloo", # log_dir="./test_logs" - ) + backend="gloo", + ).run(dist_func) assert torch.all(r.rank(0) == r.rank(1)) @@ -55,10 +54,11 @@ def dist_func() -> None: time.sleep(1) - trx.launch( - dist_func, + trx.Launcher( workers_per_host=num_workers, backend="gloo", + ).run( + dist_func, ) after_timestamp = datetime.datetime.now() @@ -95,10 +95,11 @@ def error_func() -> NoReturn: os.environ["TORCHRUNX_DIR"] = tmp with pytest.raises(ValueError) as excinfo: # noqa: PT011 - trx.launch( - error_func, + trx.Launcher( workers_per_host=1, backend="gloo", + ).run( + error_func, ) assert "abcdefg" in str(excinfo.value) diff --git a/tests/test_func.py b/tests/test_func.py index f0474b98..1a0fc5cb 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -9,10 +9,9 @@ def test_launch() -> None: - result = trx.launch( - func=simple_matmul, + result = trx.Launcher( hostnames="slurm", - ) + ).run(simple_matmul) result_values = reduce(add, result.results.values()) diff --git a/tests/test_submitit.py b/tests/test_submitit.py index 1f639df3..75a91f19 100644 --- a/tests/test_submitit.py +++ b/tests/test_submitit.py @@ -53,7 +53,7 @@ def main() -> None: def launch() -> None: - trx.launch(main, hostnames="slurm") + trx.Launcher(hostnames="slurm").run(main) def test_submitit() -> None: diff --git a/tests/test_train_gpu.py b/tests/test_train_gpu.py index e1cbad3b..6be44c04 100644 --- a/tests/test_train_gpu.py +++ b/tests/test_train_gpu.py @@ -32,10 +32,9 @@ def worker() -> None: def test_distributed_train() -> None: - trx.launch( - worker, + trx.Launcher( backend="nccl", - ) + ).run(worker) if __name__ == "__main__": From 2387124ec960bce0b402d513a924361806bf0e28 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 15 Feb 2025 14:57:40 -0500 Subject: [PATCH 116/141] adjust workerargs serialization --- src/torchrunx/worker.py | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/src/torchrunx/worker.py b/src/torchrunx/worker.py index e7307520..422f9cc4 100644 --- a/src/torchrunx/worker.py +++ b/src/torchrunx/worker.py @@ -7,12 +7,13 @@ import os import sys import traceback -from dataclasses import dataclass +from dataclasses import asdict, dataclass from typing import Any, Callable, Literal import cloudpickle import torch import torch.distributed as dist +from typing_extensions import Self from .utils.errors import ExceptionFromWorker from .utils.logging import log_records_to_socket, redirect_stdio_to_logger @@ -38,29 +39,24 @@ class WorkerArgs: hostname: str timeout: int - def serialize(self) -> SerializedWorkerArgs: + def serialize(self) -> bytes: """Arguments must be serialized (to bytes) before passed to spawned workers.""" - return SerializedWorkerArgs(worker_args=self) + return cloudpickle.dumps(asdict(self)) + @classmethod + def from_bytes(cls, b: bytes) -> Self: + """Deserialize the bytes back into a WorkerArgs object.""" + return cls(**cloudpickle.loads(b)) -class SerializedWorkerArgs: - """We use cloudpickle as a serialization backend (as it supports nearly all Python types).""" - def __init__(self, worker_args: WorkerArgs) -> None: - self.bytes = cloudpickle.dumps(worker_args) - - def deserialize(self) -> WorkerArgs: - return cloudpickle.loads(self.bytes) - - -def worker_entrypoint(serialized_worker_args: SerializedWorkerArgs) -> Any | ExceptionFromWorker: +def worker_entrypoint(serialized_worker_args: bytes) -> Any | ExceptionFromWorker: """Function called by spawned worker processes. Workers first prepare a process group (for communicating with all other workers). They then invoke the user-provided function. Logs are transmitted to the launcher process. """ - worker_args: WorkerArgs = serialized_worker_args.deserialize() + worker_args = WorkerArgs.from_bytes(serialized_worker_args) # Start logging to the logging server (i.e. the launcher) From b906bac13fd4c88ca0a1212a42f2fe2ab830de6e Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 15 Feb 2025 22:03:09 -0500 Subject: [PATCH 117/141] update docs for typed Launcher --- docs/conf.py | 7 +- docs/source/api.md | 25 +--- docs/source/features/cli.md | 8 +- pyproject.toml | 2 + src/torchrunx/__init__.py | 8 +- src/torchrunx/integrations/__init__.py | 1 - src/torchrunx/launcher.py | 168 ++++++++++--------------- src/torchrunx/utils/__init__.py | 2 - src/torchrunx/utils/environment.py | 79 +++++++----- src/torchrunx/utils/logging.py | 16 +-- uv.lock | 2 + 11 files changed, 139 insertions(+), 179 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 8de5c006..242298ae 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -24,11 +24,12 @@ "sphinx_toolbox.github", ] +maximum_signature_line_length = 90 autodoc_member_order = "bysource" -autodoc_typehints = "description" -autodoc_typehints_description_target = "documented" -intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} +intersphinx_mapping = { + 'python': ('https://docs.python.org/3.9', None), +} from docs.linkcode_github import generate_linkcode_resolve_fn linkcode_resolve = generate_linkcode_resolve_fn(project, github_username, github_repository) diff --git a/docs/source/api.md b/docs/source/api.md index 95be4c9a..6ee6d489 100644 --- a/docs/source/api.md +++ b/docs/source/api.md @@ -1,29 +1,6 @@ # API ```{eval-rst} -.. autofunction:: torchrunx.launch(func, args, kwargs, ...) -``` - -We provide the {obj}`torchrunx.Launcher` class as an alias to {obj}`torchrunx.launch`. - -```{eval-rst} -.. autoclass:: torchrunx.Launcher - :members: -``` - -## Results - -```{eval-rst} -.. autoclass:: torchrunx.LaunchResult +.. automodule:: torchrunx :members: ``` - -## Exceptions - -```{eval-rst} -.. autoexception:: torchrunx.AgentFailedError -``` - -```{eval-rst} -.. autoexception:: torchrunx.WorkerFailedError -``` diff --git a/docs/source/features/cli.md b/docs/source/features/cli.md index bce898f9..d8e33e73 100644 --- a/docs/source/features/cli.md +++ b/docs/source/features/cli.md @@ -1,16 +1,16 @@ # CLI Integration -We can use {mod}`torchrunx.Launcher` to populate arguments from the CLI (e.g. with [tyro](https://brentyi.github.io/tyro/)): +We can automatically populate {mod}`torchrunx.Launcher` arguments using most CLI tools (those that generate interfaces from Data Classes, e.g. [tyro](https://brentyi.github.io/tyro/)): ```python -import torchrunx as trx +import torchrunx import tyro def distributed_function(): - pass + ... if __name__ == "__main__": - launcher = tyro.cli(trx.Launcher) + launcher = tyro.cli(torchrunx.Launcher) launcher.run(distributed_function) ``` diff --git a/pyproject.toml b/pyproject.toml index 8c781a71..1f9aa7be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,6 +21,7 @@ dependencies = [ # torch.distributed depends on numpy # torch<=2.2 needs numpy<2 "numpy>=1.20", + "typing-extensions>=4.9.0", ] [dependency-groups] dev = ["ruff==0.9.5", "pyright[nodejs]==1.1.393", "pytest==8.3.4"] @@ -36,6 +37,7 @@ src = ["src", "tests"] [tool.ruff.lint] select = ["ALL"] ignore = [ + "D104", # package docstrings "ANN401", # self / cls / Any annotations "BLE001", # blind exceptions "TD", # todo syntax diff --git a/src/torchrunx/__init__.py b/src/torchrunx/__init__.py index 31ca0cda..e22a0126 100644 --- a/src/torchrunx/__init__.py +++ b/src/torchrunx/__init__.py @@ -1,11 +1,9 @@ -"""API for our torchrunx library.""" - from .launcher import Launcher, LaunchResult from .utils.errors import AgentFailedError, WorkerFailedError -__all__ = [ - "AgentFailedError", - "LaunchResult", +__all__ = [ # noqa: RUF022 "Launcher", + "LaunchResult", + "AgentFailedError", "WorkerFailedError", ] diff --git a/src/torchrunx/integrations/__init__.py b/src/torchrunx/integrations/__init__.py index 58cebc98..e69de29b 100644 --- a/src/torchrunx/integrations/__init__.py +++ b/src/torchrunx/integrations/__init__.py @@ -1 +0,0 @@ -"""Utilities for integrations with other libraries.""" diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 3d7a8fda..ca6d95a0 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -6,10 +6,10 @@ import itertools import socket -from dataclasses import dataclass +from dataclasses import dataclass, field from functools import partial from multiprocessing import Event, Process -from typing import TYPE_CHECKING, Callable, Generic, Literal, TypeVar +from typing import TYPE_CHECKING, Generic, TypeVar import torch.distributed as dist from typing_extensions import ParamSpec, Self @@ -22,55 +22,15 @@ from .utils.environment import ( build_launch_command, execute_command, - resolve_hostnames, - resolve_workers_per_host, + resolve_environment, ) from .utils.errors import ExceptionFromWorker, WorkerFailedError from .utils.logging import LoggingServerArgs, start_logging_server if TYPE_CHECKING: + import logging import os - from logging import Handler - - -"""Distribute and parallelize a function onto specified nodes and workers. - -Arguments: - func: Function to replicate on each node/worker. - args: Positional arguments for ``func``. Default: :py:obj:`None`. - kwargs: Keyword arguments for ``func``. Default: :py:obj:`None`. - hostnames: Nodes on which to launch the function. - Default: ``"auto"`` (infer from localhost or SLURM). - workers_per_host: Number of processes to run (e.g. # of GPUs) per node. - Default: ``"auto"`` (number of GPUs per host). - ssh_config_file: Path to an SSH configuration file for connecting to nodes. - Default: ``"~/.ssh/config"`` or ``"/etc/ssh/ssh_config"``. - backend: `Backend `_ - for worker process group. Set `None` to disable. - Default: ``"auto"`` (NCCL if GPU or GLOO if CPU). - timeout: Worker process group timeout (seconds). - Default: ``600``. - default_env_vars: Environment variables to copy from the launcher process to workers. - Supports bash pattern matching syntax. - Default: ``("PATH", "LD_LIBRARY", "LIBRARY_PATH", "PYTHON*", "CUDA*", "TORCH*", - "PYTORCH*", "NCCL*")``. - extra_env_vars: Additional user-specified environment variables to copy. - Default: ``()``. - env_file: Path to a file (e.g., ``.env``) with additional environment variables to copy. - Default: :py:obj:`None`. - propagate_exceptions: Raise exceptions from worker processes in the launcher. - If false, raises :exc:`WorkerFailedError` instead. - Default: :py:obj:`True`. - handler_factory: Function to customize processing of agent and worker logs with handlers. - Default: ``"auto"`` (see `custom logging `_). - -Raises: - RuntimeError: If there are configuration issues. - Exception: Any exception raised in a worker process is propagated. - WorkerFailedError: If a worker fails (e.g. from a segmentation fault) - or raises an exception and ``propagate_exceptions=False``. - AgentFailedError: If an agent fails, e.g. from an OS signal. -""" + import typing FunctionP = ParamSpec("FunctionP") @@ -79,22 +39,19 @@ @dataclass class Launcher: - """Alias class for :func:`launch`. Refer to that function for documentation.""" - - hostnames: list[str] | Literal["auto", "slurm"] = "auto" - """Node hostnames to use in distributed execution. "auto" and "slurm" attempt to detect this - for you based on your environmental variables.""" - workers_per_host: int | list[int] | Literal["auto"] = "auto" - """Number of worker processes per node. You can specify a constant number of workers for all - nodes (int), a different number of workers for each node (list[int]), or automatically determine - it per-node ("auto").""" + """For configuring the function launch environment.""" + + hostnames: list[str] | typing.Literal["auto", "slurm"] = "auto" + """Nodes on which to launch the function. By default, infer from localhost or SLURM.""" + workers_per_host: int | list[int] | typing.Literal["auto"] = "auto" + """Number of processes to run per node. By default, number of GPUs per host.""" ssh_config_file: str | os.PathLike | None = None - """Path to custom SSH Config for passwordless SSH into each node.""" - backend: Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None = "auto" - """A torch.distributed backend to use for inter-process communication. "auto" will use NCCL if - GPUs are detected, otherwise GLOO.""" + """For connecting to nodes. By default, ``"~/.ssh/config"`` or ``"/etc/ssh/ssh_config"``.""" + backend: typing.Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None = "auto" + """`Backend `_ + for worker process group or ``None``. By default, NCCL if GPUs detected, else GLOO.""" timeout: int = 600 - """The torch.distributed communication timeout of the worker process group, in seconds.""" + """Worker process group timeout (seconds).""" default_env_vars: tuple[str, ...] = ( "PATH", "LD_LIBRARY", @@ -105,40 +62,55 @@ class Launcher: "PYTORCH*", "NCCL*", ) - """Environmental variables to clone from the launcher process to worker processes, - supporting unix pattern matching.""" + """Environment variables to copy from the launcher process to workers. + Supports bash pattern matching syntax.""" extra_env_vars: tuple[str, ...] = () - """Additional environmental variables to set in the worker process environments, - formatted identically to the defaul_env_vars field.""" + """Additional user-specified environment variables to copy.""" env_file: str | os.PathLike | None = None - """A bash style .env file that will be sourced by worker processes.""" + """Path to (e.g. ``.env``) with additional environment variables to load onto workers.""" propagate_exceptions: bool = True - """Whether worker exceptions should be raised by the launcher.""" + """Whether to raise specific worker exceptions or :exc:`torchrunx.WorkerFailedError`.""" - def __post_init__(self) -> None: - """Initializing ``handler_factory``. Inclusion in ``__init__`` inhibits CLI generation.""" - self.handler_factory: Callable[[], list[Handler]] | Literal["auto"] | None = "auto" + handler_factory: typing.Callable[[], list[logging.Handler]] | typing.Literal["auto"] | None = ( + field(default="auto", init=False) + ) def set_handler_factory( - self, factory: Callable[[], list[Handler]] | Literal["auto"] | None + self, factory: typing.Callable[[], list[logging.Handler]] | typing.Literal["auto"] | None ) -> Self: - """Setter for log handler factory.""" + """Provide a ``factory`` to set custom handling of agent and worker logs. + + Parameters: + factory: Factory function to generate :obj:`logging.Handler` objects. + + See `custom logging `_. + """ self.handler_factory = factory return self def run( # noqa: C901, PLR0912 self, - func: Callable[FunctionP, FunctionR], + func: typing.Callable[FunctionP, FunctionR], *args: FunctionP.args, **kwargs: FunctionP.kwargs, ) -> LaunchResult[FunctionR]: - """Launch a function using class configuration.""" + """Distribute a function onto specified nodes and parallelize across workers. + + Raises: + RuntimeError: Configuration issues. + Exception: Exceptions raised in worker processes are propagated + (if ``propagate_exceptions=True``). + WorkerFailedError: If a worker fails (e.g. from a segmentation fault) + or raises an exception with ``propagate_exceptions=False``. + AgentFailedError: If an agent fails, e.g. from an OS signal. + """ if not dist.is_available(): msg = "The torch.distributed package is not available." raise RuntimeError(msg) - hostnames: list[str] = resolve_hostnames(self.hostnames) - workers_per_host: list[int] = resolve_workers_per_host(hostnames, self.workers_per_host) + hostnames, workers_per_host, backend = resolve_environment( + self.hostnames, self.workers_per_host, self.backend, self.ssh_config_file + ) launcher_hostname = socket.getfqdn() launcher_port = get_open_port() @@ -148,6 +120,20 @@ def run( # noqa: C901, PLR0912 stop_logging_event = None log_process = None launcher_agent_group = None + + _cumulative_workers = [0, *itertools.accumulate(workers_per_host)] + worker_global_ranks = [ + list(range(_cumulative_workers[n], _cumulative_workers[n + 1])) + for n in range(len(hostnames)) + ] + payload = LauncherPayload( + fn=partial(func, *(args or ()), **(kwargs or {})), + hostnames=hostnames, + worker_global_ranks=worker_global_ranks, + worker_world_size=sum(workers_per_host), + backend=backend, + timeout=self.timeout, + ) agent_payloads = None try: @@ -200,22 +186,6 @@ def run( # noqa: C901, PLR0912 # Sync initial payloads between launcher and agents - _cumulative_workers = [0, *itertools.accumulate(workers_per_host)] - - worker_global_ranks = [ - list(range(_cumulative_workers[n], _cumulative_workers[n + 1])) - for n in range(len(hostnames)) - ] - - payload = LauncherPayload( - fn=partial(func, *(args or ()), **(kwargs or {})), - hostnames=hostnames, - worker_global_ranks=worker_global_ranks, - worker_world_size=sum(workers_per_host), - backend=self.backend, - timeout=self.timeout, - ) - launcher_payload, agent_payloads = launcher_agent_group.sync_payloads(payload=payload) # Monitor agent statuses (until failed or done) @@ -234,11 +204,9 @@ def run( # noqa: C901, PLR0912 if isinstance(v, WorkerFailedError): raise v - # confirmed that no return values are exceptions - return_values: list[list[FunctionR]] = [s.return_values for s in agent_statuses] # pyright: ignore [reportAssignmentType] - if all(s.state == "done" for s in agent_statuses): - break + return_values: list[list[FunctionR]] = [s.return_values for s in agent_statuses] # pyright: ignore [reportAssignmentType] + return LaunchResult.from_returns(hostnames, return_values) finally: if stop_logging_event is not None: stop_logging_event.set() @@ -257,18 +225,16 @@ def run( # noqa: C901, PLR0912 ssh_config_file=self.ssh_config_file, ) - # if launch is successful: return objects from workers - return LaunchResult(hostnames=hostnames, return_values=return_values) - +@dataclass class LaunchResult(Generic[FunctionR]): """Container for objects returned from workers after successful launches.""" - results: dict[str, list[FunctionR]] + results: dict[str, list[FunctionR]] # [hostname][local_rank] -> FunctionR - def __init__(self, hostnames: list[str], return_values: list[list[FunctionR]]) -> None: - """Initialize from corresponding lists of hostnames and worker return values.""" - self.results: dict[str, list[FunctionR]] = dict(zip(hostnames, return_values)) + @classmethod + def from_returns(cls, hostnames: list[str], return_values: list[list[FunctionR]]) -> Self: # noqa: D102 + return cls(results=dict(zip(hostnames, return_values))) def index(self, hostname: str, locak_rank: int) -> FunctionR: """Get return value from worker by host and local rank.""" diff --git a/src/torchrunx/utils/__init__.py b/src/torchrunx/utils/__init__.py index dc4af98f..d6b94d17 100644 --- a/src/torchrunx/utils/__init__.py +++ b/src/torchrunx/utils/__init__.py @@ -1,5 +1,3 @@ -"""Utility classes and functions.""" - from .logging import add_filter_to_handler, file_handler, stream_handler __all__ = ["add_filter_to_handler", "file_handler", "stream_handler"] diff --git a/src/torchrunx/utils/environment.py b/src/torchrunx/utils/environment.py index e070d0a4..dd6dd258 100644 --- a/src/torchrunx/utils/environment.py +++ b/src/torchrunx/utils/environment.py @@ -2,13 +2,16 @@ from __future__ import annotations +from typing import Literal, Union + +from typing_extensions import TypeAlias + __all__ = [ "auto_hosts", "build_launch_command", "execute_command", + "get_gpus_per_host", "in_slurm_job", - "resolve_hostnames", - "resolve_workers_per_host", "slurm_hosts", ] @@ -20,18 +23,42 @@ import subprocess import sys from pathlib import Path -from typing import Literal import fabric +Hostnames: TypeAlias = list[str] +WorkersPerHost: TypeAlias = list[int] +Backend: TypeAlias = Union[Literal["nccl", "gloo", "mpi", "ucc"], None] -def resolve_hostnames(hostnames: list[str] | Literal["auto", "slurm"]) -> list[str]: - """Resolve hosts from environment.""" + +def resolve_environment( + hostnames: list[str] | Literal["auto", "slurm"], + workers_per_host: int | list[int] | Literal["auto"], + backend: Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None, + ssh_config_file: str | os.PathLike | None, +) -> tuple[Hostnames, WorkersPerHost, Backend]: if hostnames == "auto": - return auto_hosts() - if hostnames == "slurm": - return slurm_hosts() - return hostnames + hostnames = auto_hosts() + elif hostnames == "slurm": + hostnames = slurm_hosts() + + if isinstance(workers_per_host, int): + workers_per_host = [workers_per_host] * len(hostnames) + + if workers_per_host == "auto" or backend == "auto": + gpus_per_host: list[int] = get_gpus_per_host(hostnames, ssh_config_file) + gpus_on_every_host: bool = all(g > 0 for g in gpus_per_host) + + if workers_per_host == "auto": + if not gpus_on_every_host: + msg = 'workers_per_host="auto", but no GPUs detected on at least one host.' + raise RuntimeError(msg) + workers_per_host = gpus_per_host + + if backend == "auto": + backend = "nccl" if gpus_per_host else "gloo" + + return hostnames, workers_per_host, backend def auto_hosts() -> list[str]: @@ -55,28 +82,18 @@ def slurm_hosts() -> list[str]: return subprocess.check_output(["scontrol", "show", "hostnames"]).decode().strip().split("\n") -def resolve_workers_per_host( - hostnames: list[str], - workers_per_host: int | list[int] | Literal["auto"], -) -> list[int]: - """Resolve number of workers per host. If "auto", set to number of GPUs on each host.""" - if isinstance(workers_per_host, int): - return [workers_per_host] * len(hostnames) - - if workers_per_host == "auto": - # Execute command to count GPUs on each host - python = shlex.quote(sys.executable) - command = f"{python} -c \"import torch; print(torch.cuda.device_count(), end='')\"" - gpus_per_host = [ - int(execute_command(command, hostname, return_stdout_stderr=True)[0]) - for hostname in hostnames - ] - if any(g == 0 for g in gpus_per_host): - msg = 'workers_per_host="auto", but no GPUs detected on at least one host.' - raise RuntimeError(msg) - return gpus_per_host - - return workers_per_host +def get_gpus_per_host(hostnames: list[str], ssh_config_file: str | os.PathLike | None) -> list[int]: + """Count the number of GPUs on each host.""" + python = shlex.quote(sys.executable) + command = f"{python} -c \"import torch; print(torch.cuda.device_count(), end='')\"" + return [ + int( + execute_command( + command, hostname, ssh_config_file=ssh_config_file, return_stdout_stderr=True + )[0] + ) + for hostname in hostnames + ] def build_launch_command( diff --git a/src/torchrunx/utils/logging.py b/src/torchrunx/utils/logging.py index d8669ff7..d94e1932 100644 --- a/src/torchrunx/utils/logging.py +++ b/src/torchrunx/utils/logging.py @@ -40,7 +40,7 @@ def add_filter_to_handler( - handler: Handler, + handler: logging.Handler, hostname: str, local_rank: int | None, # None indicates agent log_level: int = logging.NOTSET, @@ -68,7 +68,7 @@ def default_handlers( hostnames: list[str], workers_per_host: list[int], log_level: int = logging.INFO, -) -> list[Handler]: +) -> list[logging.Handler]: """Default :mod:`logging.Handler`s for ``log_handlers="auto"`` in :mod:`torchrunx.launch`. Logs for ``host[0]`` and its ``local_rank[0]`` worker are written to launcher process stdout. @@ -86,7 +86,7 @@ def default_handlers( def stream_handler( hostname: str, local_rank: int | None, log_level: int = logging.NOTSET -) -> Handler: +) -> logging.Handler: """Handler builder function for writing logs from specified hostname/rank to stdout.""" handler = logging.StreamHandler(stream=sys.stdout) add_filter_to_handler(handler, hostname, local_rank, log_level=log_level) @@ -106,7 +106,7 @@ def file_handler( local_rank: int | None, file_path: str | os.PathLike, log_level: int = logging.NOTSET, -) -> Handler: +) -> logging.Handler: """Handler builder function for writing logs from specified hostname/rank to a file.""" handler = logging.FileHandler(file_path) add_filter_to_handler(handler, hostname, local_rank, log_level=log_level) @@ -121,7 +121,7 @@ def file_handlers( workers_per_host: list[int], log_dir: str | os.PathLike = Path("torchrunx_logs"), log_level: int = logging.NOTSET, -) -> list[Handler]: +) -> list[logging.Handler]: """Handler builder function for writing logs for all workers/agents to a directory. Files are named with hostname and the local_rank (for workers). @@ -199,8 +199,8 @@ def serialize(self) -> bytes: """Serialize :class:`LoggingServerArgs` for passing to a new process.""" return cloudpickle.dumps(self) - @staticmethod - def deserialize(serialized: bytes) -> LoggingServerArgs: + @classmethod + def from_bytes(cls, serialized: bytes) -> Self: """Deserialize bytes to :class:`LoggingServerArgs`.""" return cloudpickle.loads(serialized) @@ -210,7 +210,7 @@ def start_logging_server( stop_event: EventClass, ) -> None: """Serve :class:`_LogRecordSocketReceiver` until stop event triggered.""" - args = LoggingServerArgs.deserialize(serialized_args) + args = LoggingServerArgs.from_bytes(serialized_args) log_handlers = [] if args.handler_factory is None: diff --git a/uv.lock b/uv.lock index e2442610..bf7620c5 100644 --- a/uv.lock +++ b/uv.lock @@ -1810,6 +1810,7 @@ dependencies = [ { name = "numpy", version = "2.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, { name = "numpy", version = "2.2.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, { name = "torch" }, + { name = "typing-extensions" }, ] [package.dev-dependencies] @@ -1835,6 +1836,7 @@ requires-dist = [ { name = "fabric", specifier = ">=3.2" }, { name = "numpy", specifier = ">=1.20" }, { name = "torch", specifier = ">=2.0" }, + { name = "typing-extensions", specifier = ">=4.9.0" }, ] [package.metadata.requires-dev] From 2f279a4fa28984c6cc71850ade74f6593e8de86e Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 15 Feb 2025 22:43:15 -0500 Subject: [PATCH 118/141] update readme --- README.md | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 722f0edb..51b7d61f 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,7 @@ Requires: Linux (+ SSH & shared filesystem if using multiple machines) Dummy distributed training function: ```python +from __future__ import annotations import os import torch import torch.nn as nn @@ -59,15 +60,13 @@ Launching training with `torchrunx`: ```python import torchrunx -results = torchrunx.launch( - func = train, - kwargs = dict( - model = nn.Linear(10, 10), - num_steps = 10 - ), - # +results = torchrunx.Launcher( hostnames = ["localhost", "second_machine"], workers_per_host = 2 +).run( + train, + model = nn.Linear(10, 10), + num_steps = 10 ) trained_model: nn.Module = results.rank(0) @@ -118,4 +117,4 @@ torch.save(trained_model.state_dict(), "output/model.pth") > - Automatic detection of SLURM environments. > - Start multi-node training from Python notebooks! -**On our [roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement): higher-order parallelism, support for debuggers, fuller typing, and more!** +**On our [roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement): higher-order parallelism, support for debuggers, and more!** From 312be9eb3de09b8f87bae921a8144946c6592bc6 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 15 Feb 2025 23:33:48 -0500 Subject: [PATCH 119/141] update functools.partial args, kwargs in launcher --- src/torchrunx/launcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index ca6d95a0..4c1c7a9b 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -127,7 +127,7 @@ def run( # noqa: C901, PLR0912 for n in range(len(hostnames)) ] payload = LauncherPayload( - fn=partial(func, *(args or ()), **(kwargs or {})), + fn=partial(func, *args, **kwargs), hostnames=hostnames, worker_global_ranks=worker_global_ranks, worker_world_size=sum(workers_per_host), From aead8dbad7ed29b2860daf212bba10717040af5a Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 16 Feb 2025 12:30:01 -0500 Subject: [PATCH 120/141] updates to env_vars arguments --- pyproject.toml | 11 +++-- src/torchrunx/__init__.py | 3 +- src/torchrunx/launcher.py | 76 ++++++++++++++++++------------ src/torchrunx/utils/environment.py | 12 ++--- src/torchrunx/utils/logging.py | 6 +-- uv.lock | 7 +-- 6 files changed, 66 insertions(+), 49 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1f9aa7be..7925c7b7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ authors = [ ] description = "Automatically initialize distributed PyTorch environments" readme = "README.md" -license = {file = "LICENSE"} +license = { file = "LICENSE" } urls = { Repository = "https://github.com/apoorvkh/torchrunx.git", Documentation = "https://torchrun.xyz" } requires-python = ">=3.9" dependencies = [ @@ -26,8 +26,12 @@ dependencies = [ [dependency-groups] dev = ["ruff==0.9.5", "pyright[nodejs]==1.1.393", "pytest==8.3.4"] test-extras = ["submitit", "transformers"] -docs = ["sphinx==7.4.7", "furo==2024.8.6", "myst-parser==3.0.1", "sphinx-toolbox==3.8.2"] - +docs = [ + "sphinx==7.4.7", + "furo==2024.8.6", + "myst-parser==3.0.1", + "sphinx-toolbox==3.8.2", +] [tool.ruff] include = ["pyproject.toml", "src/**/*.py", "tests/**/*.py"] @@ -37,6 +41,7 @@ src = ["src", "tests"] [tool.ruff.lint] select = ["ALL"] ignore = [ + "TC003", # no type checking blocks for stdlib "D104", # package docstrings "ANN401", # self / cls / Any annotations "BLE001", # blind exceptions diff --git a/src/torchrunx/__init__.py b/src/torchrunx/__init__.py index e22a0126..0342f4b6 100644 --- a/src/torchrunx/__init__.py +++ b/src/torchrunx/__init__.py @@ -1,7 +1,8 @@ -from .launcher import Launcher, LaunchResult +from .launcher import DEFAULT_ENV_VARS_FOR_COPY, Launcher, LaunchResult from .utils.errors import AgentFailedError, WorkerFailedError __all__ = [ # noqa: RUF022 + "DEFAULT_ENV_VARS_FOR_COPY", "Launcher", "LaunchResult", "AgentFailedError", diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 4c1c7a9b..3dba4c75 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -2,14 +2,18 @@ from __future__ import annotations -__all__ = ["LaunchResult", "Launcher"] +__all__ = ["DEFAULT_ENV_VARS_FOR_COPY", "LaunchResult", "Launcher"] +import fnmatch import itertools +import logging +import os import socket +import typing from dataclasses import dataclass, field from functools import partial from multiprocessing import Event, Process -from typing import TYPE_CHECKING, Generic, TypeVar +from typing import Generic, TypeVar import torch.distributed as dist from typing_extensions import ParamSpec, Self @@ -27,11 +31,16 @@ from .utils.errors import ExceptionFromWorker, WorkerFailedError from .utils.logging import LoggingServerArgs, start_logging_server -if TYPE_CHECKING: - import logging - import os - import typing - +DEFAULT_ENV_VARS_FOR_COPY = ( + "PATH", + "LD_LIBRARY", + "LIBRARY_PATH", + "PYTHON*", + "CUDA*", + "TORCH*", + "PYTORCH*", + "NCCL*", +) FunctionP = ParamSpec("FunctionP") FunctionR = TypeVar("FunctionR") @@ -52,22 +61,13 @@ class Launcher: for worker process group or ``None``. By default, NCCL if GPUs detected, else GLOO.""" timeout: int = 600 """Worker process group timeout (seconds).""" - default_env_vars: tuple[str, ...] = ( - "PATH", - "LD_LIBRARY", - "LIBRARY_PATH", - "PYTHON*", - "CUDA*", - "TORCH*", - "PYTORCH*", - "NCCL*", - ) + copy_env_vars: tuple[str, ...] = DEFAULT_ENV_VARS_FOR_COPY """Environment variables to copy from the launcher process to workers. - Supports bash pattern matching syntax.""" - extra_env_vars: tuple[str, ...] = () - """Additional user-specified environment variables to copy.""" + Supports Unix pattern matching syntax.""" + extra_env_vars: dict[str, str] | None = None + """Additional environment variables to load onto workers.""" env_file: str | os.PathLike | None = None - """Path to (e.g. ``.env``) with additional environment variables to load onto workers.""" + """Path to a ``.env`` file, containing environment variables to load onto workers.""" propagate_exceptions: bool = True """Whether to raise specific worker exceptions or :exc:`torchrunx.WorkerFailedError`.""" @@ -88,7 +88,7 @@ def set_handler_factory( self.handler_factory = factory return self - def run( # noqa: C901, PLR0912 + def run( # noqa: C901, PLR0912, PLR0915 self, func: typing.Callable[FunctionP, FunctionR], *args: FunctionP.args, @@ -108,9 +108,27 @@ def run( # noqa: C901, PLR0912 msg = "The torch.distributed package is not available." raise RuntimeError(msg) + ### + hostnames, workers_per_host, backend = resolve_environment( self.hostnames, self.workers_per_host, self.backend, self.ssh_config_file ) + ssh_config_file = self.ssh_config_file + timeout = self.timeout + + env_vars = { + k: v + for k, v in os.environ.items() + if any(fnmatch.fnmatch(k, e) for e in self.copy_env_vars) + } + if self.extra_env_vars is not None: + env_vars.update(self.extra_env_vars) + env_file = self.env_file + + propagate_exceptions = self.propagate_exceptions + handler_factory = self.handler_factory + + ### launcher_hostname = socket.getfqdn() launcher_port = get_open_port() @@ -132,7 +150,7 @@ def run( # noqa: C901, PLR0912 worker_global_ranks=worker_global_ranks, worker_world_size=sum(workers_per_host), backend=backend, - timeout=self.timeout, + timeout=timeout, ) agent_payloads = None @@ -140,7 +158,7 @@ def run( # noqa: C901, PLR0912 # Start logging server (recieves LogRecords from agents/workers) logging_server_args = LoggingServerArgs( - handler_factory=self.handler_factory, + handler_factory=handler_factory, logging_hostname=launcher_hostname, logging_port=logging_port, hostnames=hostnames, @@ -167,11 +185,11 @@ def run( # noqa: C901, PLR0912 logger_port=logging_port, world_size=world_size, rank=i + 1, - env_vars=(self.default_env_vars + self.extra_env_vars), - env_file=self.env_file, + env_vars=env_vars, + env_file=env_file, ), hostname=hostname, - ssh_config_file=self.ssh_config_file, + ssh_config_file=ssh_config_file, ) # Initialize launcher-agent process group @@ -198,7 +216,7 @@ def run( # noqa: C901, PLR0912 for s in agent_statuses: for v in s.return_values: if isinstance(v, ExceptionFromWorker): - if self.propagate_exceptions: + if propagate_exceptions: raise v.exception raise WorkerFailedError from v.exception if isinstance(v, WorkerFailedError): @@ -222,7 +240,7 @@ def run( # noqa: C901, PLR0912 execute_command( command=f"kill {agent_payload.process_id}", hostname=agent_hostname, - ssh_config_file=self.ssh_config_file, + ssh_config_file=ssh_config_file, ) diff --git a/src/torchrunx/utils/environment.py b/src/torchrunx/utils/environment.py index dd6dd258..b5fd5fd2 100644 --- a/src/torchrunx/utils/environment.py +++ b/src/torchrunx/utils/environment.py @@ -15,7 +15,6 @@ "slurm_hosts", ] -import fnmatch import ipaddress import os import shlex @@ -102,7 +101,7 @@ def build_launch_command( logger_port: int, world_size: int, rank: int, - env_vars: tuple[str, ...], + env_vars: dict[str, str], env_file: str | os.PathLike | None, ) -> str: """Generator for command to launch torchrunx on an agent.""" @@ -110,14 +109,9 @@ def build_launch_command( commands = [] - current_dir = shlex.quote(str(Path.cwd())) - commands.append("cd " + current_dir) - - env_exports = [] - for k, v in os.environ.items(): - if any(fnmatch.fnmatch(k, e) for e in env_vars): - env_exports.append(shlex.quote(f"{k}={v}")) + commands.append(f"cd {shlex.quote(str(Path.cwd()))}") + env_exports = [shlex.quote(f"{k}={v}") for k, v in env_vars.items()] if len(env_exports) > 0: commands.append("export " + " ".join(env_exports)) diff --git a/src/torchrunx/utils/logging.py b/src/torchrunx/utils/logging.py index d94e1932..f1399411 100644 --- a/src/torchrunx/utils/logging.py +++ b/src/torchrunx/utils/logging.py @@ -26,16 +26,14 @@ from io import StringIO from logging import Handler, Logger from logging.handlers import SocketHandler +from multiprocessing.synchronize import Event as EventClass from pathlib import Path from socketserver import StreamRequestHandler, ThreadingTCPServer -from typing import TYPE_CHECKING, Callable, Literal +from typing import Callable, Literal import cloudpickle from typing_extensions import Self -if TYPE_CHECKING: - from multiprocessing.synchronize import Event as EventClass - ## Handler utilities diff --git a/uv.lock b/uv.lock index bf7620c5..13fe003e 100644 --- a/uv.lock +++ b/uv.lock @@ -1,4 +1,5 @@ version = 1 +revision = 1 requires-python = ">=3.9" resolution-markers = [ "python_full_version >= '3.13'", @@ -58,11 +59,11 @@ wheels = [ [[package]] name = "babel" -version = "2.17.0" +version = "2.16.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852 } +sdist = { url = "https://files.pythonhosted.org/packages/2a/74/f1bc80f23eeba13393b7222b11d95ca3af2c1e28edca18af487137eefed9/babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316", size = 9348104 } wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537 }, + { url = "https://files.pythonhosted.org/packages/ed/20/bc79bc575ba2e2a7f70e8a1155618bb1301eaa5132a8271373a6903f73f8/babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b", size = 9587599 }, ] [[package]] From 3ad386af034f36b6b2e07cf6e5c8650e80c4fbed Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 16 Feb 2025 12:32:04 -0500 Subject: [PATCH 121/141] small edit readme --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 51b7d61f..a847f7a0 100644 --- a/README.md +++ b/README.md @@ -74,10 +74,10 @@ torch.save(trained_model.state_dict(), "output/model.pth") ``` **See examples where we fine-tune LLMs (e.g. GPT-2 on WikiText) using:** - - [Accelerate](https://torchrun.xyz/examples/accelerate.html) - - [HF Transformers](https://torchrun.xyz/examples/transformers.html) + - [Transformers](https://torchrun.xyz/examples/transformers.html) - [DeepSpeed](https://torchrun.xyz/examples/deepspeed.html) - [PyTorch Lightning](https://torchrun.xyz/examples/lightning.html) + - [Accelerate](https://torchrun.xyz/examples/accelerate.html) **Refer to our [API](https://torchrun.xyz/api.html) and [Advanced Usage Guide](https://torchrun.xyz/advanced.html) for many more capabilities!** From 120e1e8d2b91f2f751ec56891ee6f6a6aaf749f4 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 16 Feb 2025 13:06:35 -0500 Subject: [PATCH 122/141] scripts dir --- .github/workflows/main.yml | 2 +- .github/workflows/release.yml | 2 +- .gitignore | 3 ++- docs/conf.py | 10 +++++++--- docs/source/examples/accelerate.md | 2 +- .../{scripts => artifacts}/accelerate_help.txt | 0 .../examples/{scripts => artifacts}/deepspeed_help.txt | 0 .../examples/{scripts => artifacts}/lightning_help.txt | 0 .../{scripts => artifacts}/transformers_help.txt | 0 docs/source/examples/deepspeed.md | 2 +- docs/source/examples/lightning.md | 2 +- docs/source/examples/scripts/generate_help_menus.sh | 4 ---- docs/source/examples/transformers.md | 2 +- scripts/build_docs.sh | 1 + .../scripts => scripts/examples}/accelerate_train.py | 0 .../scripts => scripts/examples}/deepspeed_train.py | 0 .../scripts => scripts/examples}/lightning_train.py | 0 .../scripts => scripts/examples}/transformers_train.py | 0 scripts/generate_help_menus.sh | 4 ++++ 19 files changed, 20 insertions(+), 14 deletions(-) rename docs/source/examples/{scripts => artifacts}/accelerate_help.txt (100%) rename docs/source/examples/{scripts => artifacts}/deepspeed_help.txt (100%) rename docs/source/examples/{scripts => artifacts}/lightning_help.txt (100%) rename docs/source/examples/{scripts => artifacts}/transformers_help.txt (100%) delete mode 100644 docs/source/examples/scripts/generate_help_menus.sh create mode 100644 scripts/build_docs.sh rename {docs/source/examples/scripts => scripts/examples}/accelerate_train.py (100%) rename {docs/source/examples/scripts => scripts/examples}/deepspeed_train.py (100%) rename {docs/source/examples/scripts => scripts/examples}/lightning_train.py (100%) rename {docs/source/examples/scripts => scripts/examples}/transformers_train.py (100%) create mode 100644 scripts/generate_help_menus.sh diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a73121b8..46e67276 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -33,7 +33,7 @@ jobs: - uses: astral-sh/setup-uv@v5 with: version: "0.5.29" - - run: uv run --group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html + - run: source ./scripts/build_docs.sh - uses: actions/upload-artifact@v4 with: name: docs-html-build diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 17298d24..82f6637a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -30,7 +30,7 @@ jobs: - uses: astral-sh/setup-uv@v5 with: version: "0.5.29" - - run: uv run --group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html + - run: source ./scripts/build_docs.sh - uses: actions/configure-pages@v5 - uses: actions/upload-pages-artifact@v3 with: diff --git a/.gitignore b/.gitignore index 952cfa2c..c358615a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,8 @@ docs/source/README.md docs/source/contributing.md +docs/source/examples/scripts/ + torchrunx_logs/ -.pixi/ .ruff_cache/ .vscode/ diff --git a/docs/conf.py b/docs/conf.py index 242298ae..355dd892 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,25 +1,29 @@ """Configuration file for the Sphinx documentation builder.""" from glob import glob +import os import shutil shutil.copyfile("../README.md", "source/README.md") shutil.copyfile("../CONTRIBUTING.md", "source/contributing.md") +os.makedirs("source/examples/scripts", exist_ok=True) +[shutil.copy(f, "source/examples/scripts/") for f in glob("../scripts/examples/*.py")] +html_extra_path = list(glob("source/examples/scripts/*.py")) + project = "torchrunx" -copyright = 'Apoorv Khandelwal and Peter Curtin' +copyright = 'Apoorv Khandelwal & Peter Curtin' github_username = "apoorvkh" github_repository = "torchrunx" html_theme = "furo" language = "en" -html_extra_path = list(glob("source/examples/scripts/*.py")) - extensions = [ "sphinx.ext.autodoc", "myst_parser", # support markdown "sphinx.ext.intersphinx", # link to external docs "sphinx.ext.napoleon", # for google style docstrings "sphinx.ext.linkcode", # link to github source + # sidebar "sphinx_toolbox.sidebar_links", "sphinx_toolbox.github", ] diff --git a/docs/source/examples/accelerate.md b/docs/source/examples/accelerate.md index 79ebf8eb..4c4291cc 100644 --- a/docs/source/examples/accelerate.md +++ b/docs/source/examples/accelerate.md @@ -8,7 +8,7 @@ Here's an example script that uses `torchrunx` with [Accelerate](https://hugging

python accelerate_train.py --help

(expand)
```{eval-rst} - .. literalinclude:: ./scripts/accelerate_help.txt + .. literalinclude:: ./artifacts/accelerate_help.txt ``` diff --git a/docs/source/examples/scripts/accelerate_help.txt b/docs/source/examples/artifacts/accelerate_help.txt similarity index 100% rename from docs/source/examples/scripts/accelerate_help.txt rename to docs/source/examples/artifacts/accelerate_help.txt diff --git a/docs/source/examples/scripts/deepspeed_help.txt b/docs/source/examples/artifacts/deepspeed_help.txt similarity index 100% rename from docs/source/examples/scripts/deepspeed_help.txt rename to docs/source/examples/artifacts/deepspeed_help.txt diff --git a/docs/source/examples/scripts/lightning_help.txt b/docs/source/examples/artifacts/lightning_help.txt similarity index 100% rename from docs/source/examples/scripts/lightning_help.txt rename to docs/source/examples/artifacts/lightning_help.txt diff --git a/docs/source/examples/scripts/transformers_help.txt b/docs/source/examples/artifacts/transformers_help.txt similarity index 100% rename from docs/source/examples/scripts/transformers_help.txt rename to docs/source/examples/artifacts/transformers_help.txt diff --git a/docs/source/examples/deepspeed.md b/docs/source/examples/deepspeed.md index 207af460..f390b47c 100644 --- a/docs/source/examples/deepspeed.md +++ b/docs/source/examples/deepspeed.md @@ -8,7 +8,7 @@ Here's an example script that uses `torchrunx` with [DeepSpeed](https://www.deep

python deepspeed_train.py --help

(expand)
```{eval-rst} - .. literalinclude:: ./scripts/deepspeed_help.txt + .. literalinclude:: ./artifacts/deepspeed_help.txt ``` diff --git a/docs/source/examples/lightning.md b/docs/source/examples/lightning.md index 21599da7..681d3183 100644 --- a/docs/source/examples/lightning.md +++ b/docs/source/examples/lightning.md @@ -8,7 +8,7 @@ Here's an example script that uses `torchrunx` with [PyTorch Lightning](https://

python lightning_train.py --help

(expand)
```{eval-rst} - .. literalinclude:: ./scripts/lightning_help.txt + .. literalinclude:: ./artifacts/lightning_help.txt ``` diff --git a/docs/source/examples/scripts/generate_help_menus.sh b/docs/source/examples/scripts/generate_help_menus.sh deleted file mode 100644 index db639a16..00000000 --- a/docs/source/examples/scripts/generate_help_menus.sh +++ /dev/null @@ -1,4 +0,0 @@ -uv run docs/source/examples/scripts/transformers_train.py --help > docs/source/examples/scripts/transformers_help.txt -uv run docs/source/examples/scripts/deepspeed_train.py --help > docs/source/examples/scripts/deepspeed_help.txt -uv run docs/source/examples/scripts/lightning_train.py --help > docs/source/examples/scripts/lightning_help.txt -uv run docs/source/examples/scripts/accelerate_train.py --help > docs/source/examples/scripts/accelerate_help.txt diff --git a/docs/source/examples/transformers.md b/docs/source/examples/transformers.md index 2e0fb9c0..607f7777 100644 --- a/docs/source/examples/transformers.md +++ b/docs/source/examples/transformers.md @@ -8,7 +8,7 @@ Here's an example script that uses `torchrunx` with [`transformers.Trainer`](htt

python transformers_train.py --help

(expand)
```{eval-rst} - .. literalinclude:: ./scripts/transformers_help.txt + .. literalinclude:: ./artifacts/transformers_help.txt ``` diff --git a/scripts/build_docs.sh b/scripts/build_docs.sh new file mode 100644 index 00000000..cae5bc92 --- /dev/null +++ b/scripts/build_docs.sh @@ -0,0 +1 @@ +uv run --group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html diff --git a/docs/source/examples/scripts/accelerate_train.py b/scripts/examples/accelerate_train.py similarity index 100% rename from docs/source/examples/scripts/accelerate_train.py rename to scripts/examples/accelerate_train.py diff --git a/docs/source/examples/scripts/deepspeed_train.py b/scripts/examples/deepspeed_train.py similarity index 100% rename from docs/source/examples/scripts/deepspeed_train.py rename to scripts/examples/deepspeed_train.py diff --git a/docs/source/examples/scripts/lightning_train.py b/scripts/examples/lightning_train.py similarity index 100% rename from docs/source/examples/scripts/lightning_train.py rename to scripts/examples/lightning_train.py diff --git a/docs/source/examples/scripts/transformers_train.py b/scripts/examples/transformers_train.py similarity index 100% rename from docs/source/examples/scripts/transformers_train.py rename to scripts/examples/transformers_train.py diff --git a/scripts/generate_help_menus.sh b/scripts/generate_help_menus.sh new file mode 100644 index 00000000..2b7ada5f --- /dev/null +++ b/scripts/generate_help_menus.sh @@ -0,0 +1,4 @@ +uv run scripts/examples/transformers_train.py --help > docs/source/examples/artifacts/transformers_help.txt +uv run scripts/examples/deepspeed_train.py --help > docs/source/examples/artifacts/deepspeed_help.txt +uv run scripts/examples/lightning_train.py --help > docs/source/examples/artifacts/lightning_help.txt +uv run scripts/examples/accelerate_train.py --help > docs/source/examples/artifacts/accelerate_help.txt From c29581973de3227e097a7f91993193f9a09ba940 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 16 Feb 2025 13:29:22 -0500 Subject: [PATCH 123/141] moved docs artifacts again --- .../artifacts/accelerate_help.txt | 0 docs/source/artifacts/cli_help.txt | 36 +++++++++++++++++++ .../artifacts/deepspeed_help.txt | 0 .../artifacts/lightning_help.txt | 0 .../artifacts/transformers_help.txt | 0 docs/source/examples/accelerate.md | 2 +- docs/source/examples/deepspeed.md | 2 +- docs/source/examples/lightning.md | 2 +- docs/source/examples/transformers.md | 2 +- docs/source/features/cli.md | 24 +++---------- scripts/generate_help_menus.sh | 12 ++++--- 11 files changed, 52 insertions(+), 28 deletions(-) rename docs/source/{examples => }/artifacts/accelerate_help.txt (100%) create mode 100644 docs/source/artifacts/cli_help.txt rename docs/source/{examples => }/artifacts/deepspeed_help.txt (100%) rename docs/source/{examples => }/artifacts/lightning_help.txt (100%) rename docs/source/{examples => }/artifacts/transformers_help.txt (100%) diff --git a/docs/source/examples/artifacts/accelerate_help.txt b/docs/source/artifacts/accelerate_help.txt similarity index 100% rename from docs/source/examples/artifacts/accelerate_help.txt rename to docs/source/artifacts/accelerate_help.txt diff --git a/docs/source/artifacts/cli_help.txt b/docs/source/artifacts/cli_help.txt new file mode 100644 index 00000000..770259d6 --- /dev/null +++ b/docs/source/artifacts/cli_help.txt @@ -0,0 +1,36 @@ +usage: -c [-h] [OPTIONS] + +For configuring the function launch environment. + +╭─ options ──────────────────────────────────────────────────────────────────╮ +│ -h, --help │ +│ show this help message and exit │ +│ --hostnames {[STR [STR ...]]}|{auto,slurm} │ +│ Nodes on which to launch the function. By default, infer from │ +│ localhost or SLURM. (default: auto) │ +│ --workers-per-host INT|{[INT [INT ...]]}|{auto} │ +│ Number of processes to run per node. By default, number of GPUs per │ +│ host. (default: auto) │ +│ --ssh-config-file {None}|STR|PATHLIKE │ +│ For connecting to nodes. By default, ``"~/.ssh/config"`` or │ +│ ``"/etc/ssh/ssh_config"``. (default: None) │ +│ --backend {None,nccl,gloo,mpi,ucc,auto} │ +│ `Backend │ +│

python accelerate_train.py --help

(expand) ```{eval-rst} - .. literalinclude:: ./artifacts/accelerate_help.txt + .. literalinclude:: ../artifacts/accelerate_help.txt ``` diff --git a/docs/source/examples/deepspeed.md b/docs/source/examples/deepspeed.md index f390b47c..c0b3bc64 100644 --- a/docs/source/examples/deepspeed.md +++ b/docs/source/examples/deepspeed.md @@ -8,7 +8,7 @@ Here's an example script that uses `torchrunx` with [DeepSpeed](https://www.deep

python deepspeed_train.py --help

(expand)
```{eval-rst} - .. literalinclude:: ./artifacts/deepspeed_help.txt + .. literalinclude:: ../artifacts/deepspeed_help.txt ``` diff --git a/docs/source/examples/lightning.md b/docs/source/examples/lightning.md index 681d3183..7814de2f 100644 --- a/docs/source/examples/lightning.md +++ b/docs/source/examples/lightning.md @@ -8,7 +8,7 @@ Here's an example script that uses `torchrunx` with [PyTorch Lightning](https://

python lightning_train.py --help

(expand)
```{eval-rst} - .. literalinclude:: ./artifacts/lightning_help.txt + .. literalinclude:: ../artifacts/lightning_help.txt ``` diff --git a/docs/source/examples/transformers.md b/docs/source/examples/transformers.md index 607f7777..097483d2 100644 --- a/docs/source/examples/transformers.md +++ b/docs/source/examples/transformers.md @@ -8,7 +8,7 @@ Here's an example script that uses `torchrunx` with [`transformers.Trainer`](htt

python transformers_train.py --help

(expand)
```{eval-rst} - .. literalinclude:: ./artifacts/transformers_help.txt + .. literalinclude:: ../artifacts/transformers_help.txt ``` diff --git a/docs/source/features/cli.md b/docs/source/features/cli.md index d8e33e73..cae7ee98 100644 --- a/docs/source/features/cli.md +++ b/docs/source/features/cli.md @@ -1,6 +1,6 @@ # CLI Integration -We can automatically populate {mod}`torchrunx.Launcher` arguments using most CLI tools (those that generate interfaces from Data Classes, e.g. [tyro](https://brentyi.github.io/tyro/)): +We can automatically populate {mod}`torchrunx.Launcher` arguments using most CLI tools, e.g. [`tyro`](https://brentyi.github.io/tyro/) or any that [generate interfaces from dataclasses](https://brentyi.github.io/tyro/goals_and_alternatives). ```python import torchrunx @@ -16,23 +16,7 @@ if __name__ == "__main__": `python ... --help` then results in: -```bash -╭─ options ─────────────────────────────────────────────╮ -│ -h, --help show this help message and exit │ -│ --hostnames {[STR [STR ...]]}|{auto,slurm} │ -│ (default: auto) │ -│ --workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ -│ (default: auto) │ -│ --ssh-config-file {None}|STR|PATH │ -│ (default: None) │ -│ --backend {None,nccl,gloo,mpi,ucc,auto} │ -│ (default: auto) │ -│ --timeout INT (default: 600) │ -│ --default-env-vars [STR [STR ...]] │ -│ (default: PATH LD_LIBRARY ...) │ -│ --extra-env-vars [STR [STR ...]] │ -│ (default: ) │ -│ --env-file {None}|STR|PATH │ -│ (default: None) │ -╰───────────────────────────────────────────────────────╯ +```{eval-rst} +.. literalinclude:: ../artifacts/cli_help.txt + :lines: 3- ``` diff --git a/scripts/generate_help_menus.sh b/scripts/generate_help_menus.sh index 2b7ada5f..10baf344 100644 --- a/scripts/generate_help_menus.sh +++ b/scripts/generate_help_menus.sh @@ -1,4 +1,8 @@ -uv run scripts/examples/transformers_train.py --help > docs/source/examples/artifacts/transformers_help.txt -uv run scripts/examples/deepspeed_train.py --help > docs/source/examples/artifacts/deepspeed_help.txt -uv run scripts/examples/lightning_train.py --help > docs/source/examples/artifacts/lightning_help.txt -uv run scripts/examples/accelerate_train.py --help > docs/source/examples/artifacts/accelerate_help.txt +mkdir docs/source/artifacts + +uv run --with tyro python -c "import torchrunx; import tyro; tyro.cli(torchrunx.Launcher)" --help > docs/source/artifacts/cli_help.txt + +uv run scripts/examples/transformers_train.py --help > docs/source/artifacts/transformers_help.txt +uv run scripts/examples/deepspeed_train.py --help > docs/source/artifacts/deepspeed_help.txt +uv run scripts/examples/lightning_train.py --help > docs/source/artifacts/lightning_help.txt +uv run scripts/examples/accelerate_train.py --help > docs/source/artifacts/accelerate_help.txt From 22b7db6e8e7975015d5f587e686de09dd997f427 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Thu, 20 Feb 2025 18:16:12 -0500 Subject: [PATCH 124/141] updated how it works --- README.md | 5 +++-- docs/source/how_it_works.md | 16 +++++++++++----- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index a847f7a0..6cdb01ea 100644 --- a/README.md +++ b/README.md @@ -112,8 +112,9 @@ torch.save(trained_model.state_dict(), "output/model.pth") 5. **Bonus features** 🎁 -> - Fine-grained, custom handling of logging, environment variables, and exception propagation. We have nice defaults too: no more interleaved logs and irrelevant exceptions! -> - No need to manually set up a [`dist.init_process_group`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) +> - Typing for function arguments and return values. +> - Custom, fine-grained handling of logging, environment variables, and exception propagation. We have nice defaults too: no more interleaved logs and irrelevant exceptions! +> - No need to manually set up [`dist.init_process_group`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) > - Automatic detection of SLURM environments. > - Start multi-node training from Python notebooks! diff --git a/docs/source/how_it_works.md b/docs/source/how_it_works.md index 7bf35cb2..c9005c15 100644 --- a/docs/source/how_it_works.md +++ b/docs/source/how_it_works.md @@ -1,11 +1,17 @@ # How It Works -If you want to (e.g.) train your model on several machines with **N** GPUs each, you should run your training function in **N** parallel processes on each machine. During training, each of these processes runs the same training code (i.e. your function) and communicate with each other (e.g. to synchronize gradients) using a [distributed process group](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group). +Suppose you want to run a script (`train.py`) on `N` machines (or "nodes") with `M` GPUs each. -Your script can call our library (via `mod:torchrunx.launch`) and specify a function to distribute. The main process running your script is henceforth known as the **launcher** process. +You'll need to start a new process for each GPU. Each process will execute your script in parallel and select its GPU based on the process rank. Your script will also form a [distributed group](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) so the processes may communicate with each other (e.g. passing tensors). -Our launcher process spawns an **agent** process (via SSH) on each machine. Each agent then spawns **N** processes (known as **workers**) on its machine. All workers form a process group (with the specified `mod:torchrunx.launch` `backend`) and run your function in parallel. +Normally, you'd do this by running the `torchrun --node-rank {i} ... train.py ...` command on every machine. In short, you'll end up with a topology like: -**Agent–Worker Communication.** Our agents poll their workers every second and time-out if unresponsive for 5 seconds. Upon polling, our agents receive `None` (if the worker is still running) or a [RunProcsResult](https://pytorch.org/docs/stable/elastic/multiprocessing.html#torch.distributed.elastic.multiprocessing.api.RunProcsResult), indicating that the workers have either completed (providing an object returned from or the exception raised by our function) or failed (e.g. due to segmentation fault or OS signal). +> -**Launcher–Agent Communication.** The launcher and agents form a distributed group (with the CPU-based [GLOO backend](https://pytorch.org/docs/stable/distributed.html#backends)) for the communication purposes of our library. Our agents synchronize their own "statuses" with each other and the launcher. An agent's status can include whether it is running/failed/completed and the result of the function. If the launcher or any agent fails to synchronize, all raise a `mod:torchrunx.AgentFailedError` and terminate. If any worker fails or raises an exception, the launcher raises a `mod:torchrunx.WorkerFailedError` or that exception and terminates along with all the agents. If all agents succeed, the launcher returns the objects returned by each worker. +As a side effect of this structure, every process will run until (1) script completion or (2) another process stops communicating (e.g. if killed by the system for abnormal reasons). The status of other processes is not actively communicated: so if some process is indeed killed, it would take 10 minutes (by default) for the remaining processes to time-out. Also, since this approach parallelizes the entire script, we can't catch and handle these system-level issues as exceptions. + +`torchrunx` offers a functional interface, with a launcher–worker topology, instead. + +> + +{func}`torchrunx.Launcher.run` runs in the current, *launcher* process. It uses SSH to start an *agent* process on every node (specified in `hostnames`), which in turn spawn `M` *worker* processes. The workers form a distributed process group and each executes `func(*args, **kwargs)` in parallel. Once all workers are finished, all of their returned values are propagated to the initial launcher process. Our agents constantly communicate (over their own GLOO-backend distributed group), so any agent or worker failures are immediately propagated, and all launched processes are terminated. Worker exceptions and system failures are propagated to and raised by {func}`torchrunx.Launcher.run`. From eb1892a33966760264c02f6f283268fba5ffcd4d Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 21 Feb 2025 17:48:37 -0500 Subject: [PATCH 125/141] how it works --- docs/source/artifacts/torchrun.png | Bin 0 -> 103577 bytes docs/source/artifacts/torchrunx.excalidraw | 1172 ++++++++++++++++++++ docs/source/artifacts/torchrunx.png | Bin 0 -> 114479 bytes docs/source/how_it_works.md | 4 +- 4 files changed, 1174 insertions(+), 2 deletions(-) create mode 100644 docs/source/artifacts/torchrun.png create mode 100644 docs/source/artifacts/torchrunx.excalidraw create mode 100644 docs/source/artifacts/torchrunx.png diff --git a/docs/source/artifacts/torchrun.png b/docs/source/artifacts/torchrun.png new file mode 100644 index 0000000000000000000000000000000000000000..a65a978faad1e597e1c1e50035336575f9142a72 GIT binary patch literal 103577 zcmZ_$1yoyI)HMpDB|w4TrN!MT6pFjMyA?@sch?kmD{jTzU5XZWx8m;ZH}v`5|G(cD z?;YcWgyih(v)7)>=2|Bq3UcBoNFR})prBABB}9~5Oo}e9+ z#D$>BNAdR||A>GzB)`kbLeWBA1E8RT%%I@jPJui=LLN|1uvyShu#jiyxA(GO{^wJ; zz%1DRc?})+cH%P2pd=KO0FQ96PzmgsE1zRTE7+BZb$Q2;e*a4Z}m(qOh> zr)$n?QRZN>=+)J52IryMcTwKYR08mdiogL*llipnBWo9*Yx8^$6WAff;S;$#++jy2 zr@Y&eiJPY2kVem>wq2g`*E;sF`tlhJ<#jC4ry755MvPLG z$CR9C+Lw@k_o0tEs^6r)O?0Ef!GNK#g#1`yqhbH=FZJ~`_^4v0qST})i0w|H>Pjfe zrN5~TCYW0>+b~s&wLWW-Yo^h|cOlZ<{gGZC=KQ{$GAhj)(2H1*e8S_jyUkTHxJSyL zgzkoNJK;6zH01cW9KT!ISTz`oL8?clOKYpMU(J89oG#wO*-`dnr+?{ZU}1}^AxQoz z35r!Ah$*gy8OW0`?2VzlY@B4hZOS(%O}{MZv6I_%FLmUb!-^>n@!Zf4KCsnw*tI)r z`qSNCoU{|!5~;%{f;qR%tU@SU2UJXQ4RI`w+tlGx!51dT?wJD-_BuJH2Prj@rG?Uk z78SONcqR_zDKV-{qi+YZ8g(0L=xGPdeslc;fZ-DpK=%nZFG=(&joU;0zqi}h2KdKm zXR7&1_Mz{njMe8>ezAU8w|#>73I;AEx$_UIaFM){%R$oe(uc(pky&&>$?0Ju-<`+^ zh^YJf(DG9Ke5-u;6Ra1F)$lpKQBzQ)Sal3qY5O?boAz>Q^5jMDrtRS2?p{>XCXC8| zxohs1&Tv>cDPYG>h=S8_F>hNoQ_3{oSFzBkxeRqIaw=oWocUonSeAP|d}_B+o;1ZV zUAmS|S>e`K_Hcwux{cxm5rRFJ1GcK^iwqJ}IM^qki39>Q9a<1f)VY=5pY zaEVP=9#n5r@#E^qj7zIwrzwEp9lPKEC||pXZ2o^Hsb;ZH_4Ow^G7%_fV4%1 z@1TPM49ECVN&6zm)NqQbs!C1$s{xtsfsk*6^1~siWHBP}Y6lT|)nB)`FUdzru!{de zQIs>C8T$~uC@vdjedw;BWK)#i_6M!@2&B0on5a<*${eE5KFQN6Z#+y%7)AQbNVnQs z^)Sg4kd)v?kLkcb;4m|`U6yBD-AZl2AX_t8#-r1sT7QsIQg*PL;neu3EdHp5r`}O= zQ2#JC2VzQwAKIcx4VGS5apP%fy0DXcp~{{OMjrL-_4eNzRX1|gjl(-X$(U&*tU2#y zKc|ma+lnFjYZw38l$d_5#CkvG;D)DGbb<&7D8t23hy<$Zvit&;`QY>cb zFM3*WRWaTxx8f)?C2~>HCz46G+?jHx9Id?_$f|8Z@8QfbAy(@v<}X%Ezm3!O+_Zio zDl(>uWG#^|#D9I@y86Q>j3{NsNrTX+kPy*p$eERe-bDT;bxQI|(=iNpmu{Vh%fBgH z+D*Sso5&OU$9=c>#8gbl`CB?iisY0e-aEpiR@d)$5Zx(jHhP3Kaajm695$_+3GWdF zl;<;^6TlL{Z(SJsmqr z50!ADu2OcDaL%_R5;v(N(&jdGc6J?5XW1ehKJGS@&+uru-I=0L>{e~eD)Owvz*?nW z#^|Rp>am=~GN5lYdE6(qj@q4H$f%dH8n_whW7;J%#?xJxLe84f*x!h}RNTHCA2*+y zuKcXdC%4N%)GNJ%Db7GDG;1(K<)L${eP3_&eZZyzZ@c5AIDt`H&4tp%{BFPClU*ET zk%18B^s;`4JNJ*U|6BvK;5Xeq%IAccQ}d3a5-Q^vp{-$l*a~|tsD?Ra^P_O`1 zC7BJoSM$BL(P&~t!sKAJd57)^kvE;}$ydH{bOAnoawu>#5o!v*bCchKlmT%*0=M2}G5XRaqfP;~k#cwCwnV8TUo3$tp6@43`W-pYz_j za(Eh+V3dD8)ixYXPy0UOWwN-TWJUb}S-c7NIz2~jj|T~jG%|)M6ameT)THYCrulWk zXSP-UxM7S9mNEPAogN_%F>#ST&{yaW(~kikZq0g6&OChwSqA7FO4*!1Z5y_k50(oW z$fy_sodLcX38HzIJBq6%6|s6MTXv!!JUBaWH_Mz4+7l*9M|^E z>yMhXM*<0G{SYke%jOu04h7OcVC{EylRdSxRmoZ-RkRbl}!2Kzjo#w+V zKvxhPT&qBq?I`}B*9qqoJ#F<|L6GCAbw1BiCB|f339gm^_J45klOygZ^%WcCo5QKm zbu7ML#EYUk&RLo_dy8aTgE|+7WgYj`+2<2hTn-re^xb7D6GU#{MFg^v5d}!5hdW{0 zng+cdsh=MXi|kMJw5Mm8S_Rg-1X*DKzP`5rmG8zs-CkcBl%9kW@TwEzManX~5``YG z&h>-~5&uOE#KegBuR9vrfru1%|H5N=Uf;rWSV*9M{7{-eDM+%7a)#CoK`_9 zYc=GrMR0`}5sz;(KY_FW*O4EI|r7_<2aLf(q~MhxW2FU!;i7n-8Z0muwnZ zDhdjePdZ-mQu56TNMQa?Ac^BgmDpxT7a;a4{XHle{+&3`^c{>5id{)yI8 zFm^9GoK_&j!87>to_%ESe6*r{y;k=q$W@c)lO##!g3ri`2Echn3mk!QmAV)j=6UHJ zFXd@HkJ;Xk_@6=j2D9xB-`3z=j-CXTX8L?&h)>oa%8jx zr+Ih<=!pL(U?iaMyJ+)>z+Z3ZL6Y1EGMn%8o27|_NN7ZW_+}%F=KCV+5m!81ly}%ayn`?lg zto&*e=jGG*y|edB8>7<*Wx_L4;3tmMW=nW1!HT}4#6n8L)Gc=e;X86jPz__Rkt$yp zDjXD^BhOcbErd9ylN&tlNgyB%S;h>2eE6q9Ca(7jXEGk%4JYY;oBvz-?_`BdU)8tF~1rIqkna+ z^oj%kd<8BsgFkR6L~!onx2)$>bkzToIYjJmRDS7A9*)u%77zM4o9pSKu#i3Hgeg)i zE3`7UxL7Iwyd^ z&3mLCLW*}Cuj?UG69OW5Z~F}(hB>u*=vlqnJn)-3a)k;3C<=5kAwC`VNFSeWKIw)% zE4_1E?0_if*4pM=P8)mA&FsU@$VJpoGS1|jzR2F)B2jU;hydR(0gbtG^Q)iC7r&G| z<%iJlD>L5*p#O_l11|{iz#6rL8lF zvGB)IOIKo=lYA~whx)%Ii=Jy%d2>%IwM6%OC`Y=`g zA=Pi&a72>G%1_`KE|wI7nv+HG`-9u5l7`b<6ta4%FGL+=q1ZHv9n1A?!ah1*3^>1G z-7k>};)H6;;Ot*+w1->(y#Jx!6dmT1L;d3AGv?Aw!zRNd9_xV<%n!2)j9d4W!b&$l z_#x}0wW-I8H8*7ESP`I?u~Gu;ed`sgtWp^?_d5ORnMkZxFQ6(7PoPXyvJWNT{_vt*BSEjo7g}K!9_n zaBA?0I~`p?-p(Kj*CckDcsYZQRfx`WLllX?M|>3=32T+qqSLMpym?O`bzjlGNPe9= zxkClyax5%c4^S$+zt0tY1JoHjB+4qGZ}!UO|2-Wfm0jv)-TUc)u4HXS&BXGod5`<@ z_74{t4#>6bBn7+L(BdjcdB3CNyRd;G<|uQ9OUUWBxp90WGxi?<006S|bt799I9g=n z<5WS7(^YX*1SGbMe}2m8c15M$t!AX-ylz@I$9Mo^s8FL}fz#99(hb%FHBt_Rwbe3uj)kjhD^|j@ zIGPRl^9_&9-1hH_JdjbxhkK(YjfV7;9HcHy_0I)0$OvG^nnQ~THyd(o+3#lI5zS=9 zIv;(KQ%Xw;=aT@`as|)*QdUc|NzFLpUNjF+4zv(}%rg1*fU{Fh=BO#HUu(X{`P8-G%vQIR{ z!i_svSEuIi6EOd$y?QVqis!QKK4^N?BJzwLy*iZ^RTocD$c$Uhr%a&{uG!h ziu2Mibv&L&tfUCRu2n0uGy$u9jFc7bT<%!A-fVAOK1A8mUTKL2h{`I%D_^zz9bwm< zLG}>^(W%EOspaFfIGee>-lGwu@vRUOpnY!Zn8>bj%s+0XU)N?Cj8;6s3L+e${l6D z5SlOc+1}n*>MojutsH-;NK-)|;iNgCveKxeG-DE+>0_;uUoeDkUZN(}CCjcjD2%39 zyL4~Y+w5dMakOwf?JFHYnlrJ`v3?qB!^pobCNrtqVPMw3*|=LXq0@1B;- zmZ;(#ZoADjM$SyO*%>m3ZZyo``WWGIbRI%!i!!IUd?>yP_` zi1{atO`0)??(RbR%R{VmF)Q9G5a`r-&dmq=wlBafxWVOiOMm=;x!$$p7I7werr67T z-ExAQBhKl;E2ruQ&1phL>cCJ$bvswazkE5Ci7gFjTsCj~_)9SqCGlIVT z$k1?F{%-+{ATQLSN>{fPy#Gf60w|38NI-KXuv%z)wGC^sp@OH3!|~5&78Vs|?W}RR z^iuj&&cuWT%;@OZvg1Y5%ZNf|Hzz{~)PH8LgPMz5P*<0Q2s;lc@d&c`mE;7nwK}<_ZHM9&US= zl5BGTibT26{7>V_{!i_!db-byh(y9qSu%X|7N1s9`W*4UuX60sQvCV>YhF+(@bInM zq*jXhrx2K8Af)J-hJBaE&3Jt_4aGaqfD>V{l^E}dk#JXi_M5SB@TsU5@ZJ+sBXmqA4 zYR#1C!Zd`$QWO}gg=CnhA(=Z)cCG}lwJeLATtg9G@Ey?A#2ZR8>P_0N=W>7cLloPD}OAyS!aHmh?}~uKC{iQ3Eiwghp?pu#QVZENXSi6-U5NP@nB6I`3u@DQ z6El=ZKhEwQ_7*LQ4Cffj^%4jHdai<2*AItcJ)M5euXl!C*_HzsZk)z{fWjIgbF}?Z z#BGi9Gxbw!`-K&JI@~PKHN$l?47(~Rz&^_((IJ(P|( z&bTxbXJ%efR^^Qcbnsfx1&p42jdZ7S{(>|Z7N4ZvMs{dIfz>g;hK8W_002eDV#NrY zW?uU*(6VT9{Fud718l2fO{e$_2%)&r6;T^NC;Y7ZnOmL}VGuQ4rIVy=UVsxye;+#a<0cx*MTs-6?aAt~I}#Zs-nMk&p8{zTScGcXqCWnc zFDAoTlc1p^v1z$<$GSp^5*}M$q%{wDJET1MjohdMgJT_wKJX+MRmToI+TyMWmcsdi z7?!32=mmmrj`o!XgWroee-_C`4kc>d=$x zuq#I+?FvDm9Qoi}h5EdIa8gf`OL@(sUC&@lwj4}Gjp&S`{JwX;~*iC=o1nUouux2Bayc3Rl&ERxhya>t8w{w*Lf!* zZX*XxuK7^$eo^L049O1Oj7l0zgQVz|plC?+kCExx6}&e$28w+;tcbs#Rwj?9k9Qhj z(`p&xBg2q>WI63vQ+wAdlOYqC!7D4OvAxxLms6r^kp0V5!*jdsKA$FDFqASetIqq; zs{8i~_`R?JIJ$t$Tz&mvZ6t@3Ge)61&!I~zISiLE9*L|>n_V~J_f&zFZzSpODWN^; z&|FC7^cKxQp?9Lmo}%q^@pcSIiRY)w3@2d=@tz&r&}xyW^$ zS_wJ0*zcpi0zFRiej+m}2=&A=myjx3%AP!3%DWB9m4@vM$HaZm)j$HRy5_o@F0xYG zE|_dz?|m7orQ%S#e~66i5lQY)I?7U$&89|DwOkVG&Ge>yCG3?;AH@76HlwTG^4l{a z34!!Y)Nco7XDsunH|k(7qrSF%J>8LjyGoFV4N+eulCo~oyDJ*IIL-NbWNG-`y zo^8sqQ)?Q#cY_g0)uvg2=84irRY2avo=nazdhNEsfJct3`=Cg$ES& zXoqWaO`qXklJ(^x=XX^^eUT{^dkTZM$6x5dzIcSzdrS32t>n@XVYz>3ly#o5hequ) zU2_(V^BgBg-A0<+Y6hfePsDrv{#I%7U{65a#-d9$WNaA@Px(5cDh|6gGp#}+KZ-!K z*JW3m)@iGFlNx{dhpfjVwK`Av$aoI3@|{|kEB2&wMoQ;4|WmiZ){^bJza#ysomHCZB)U zy_#w{s`FH+L)$mhlY!^eaYZ!ENUJ)U)MIVTYJTHp&xuDD;F`R^5b!Ei^JDE-FStMU zj7CJ?`gwH^e@;-(2t3^_ox$ch+|uU~?}TS4xzrpk`(UEo{>2rc)|`O5OR~LYycDb2 zzH@gEP#Jk%t+q6>!#o?jL9DA14@EOzPVEe0&s+avZoujFscG@lY&36g%Uu~ z#ccKuNs59JEmAeu7-Z4-*Wt9?a)Uxwgq9=yFZrJoctkcpM>)YCQZcH$JZ{omq~I0q zLa3BnEA(g!92DleBs5^~I5{09?ht6#Y9;fAh7BL z!$UCIXts2FyI{-d9Mw!keOS8wC-gv2T$l7bM{TH#xrn5(6}?{yn(HTR!h6B2D!59C zs;ZvoHPKFCk{%pFK);-TeuEsX3_byOisid_*IsO`^EA%)`L9yyW1isx{cWDr zS7>qtqu?2GY1LI@t5&abt&;XdQXGSEC2(636Gl=C2!KQLv<~)IhnHmWGD>)A;)>)1 z6wnr)QPmuw|J9`7fY2Jwt}dSueqAwh_q${{xThJY4|97xTijXQxap$TlQ)~}y-#7g zqHKaIogAkbL^l%&61(83P>U((TP`>mT$9#za8YXfn4dYH-!UjXR%)v@SLJBGua_v@di-_pVV7T$-!T~9=N?%!-J_f&UdcSW zl7m*W%AY_hj%1NWuSRTl+e(K?8@X4p4mr7kqRa^oeR_ep%1Nk1KdoZ3`*!+gpN^OeC5zcvE)z%h^pKRLn!SpI98^Tdw zRB$a-%yP&}gro%U_i3SF)ix;TB6Vn*~nJ1l;vJQ)|80SAuB3$@Ygs2?7FYM>@=si-xtwL#wdsU>2s$F8QJke?kHCn zsYt4rNFXEO7qfvcCdCHnT#CQ@iIlf9cvV@{)08u!{eB8pGoDpB(IMG)W79mNOo4Y!=S#|Y{+5z?KUUt5%{aOp29oi z+ZaOts(&Wpuv{;?Ko;j)X(O!`wva)dNN94A+}GHGMiIkg_**!1?pMUBwNzGdymU4K zX$+Y;km%LHe#w~|g*uczf&vO;WfVff|K_OS=%G1EQ0BA2-=IZ&(|yQ%1souCoakzXEY+5R=lrZCD42V8qn334Mge^ zy#4VfBM~8EsJ~T_{Qx4aGgN3#_O@>Hx_<+SfeP3%kFW;#B*Nb?$Im}ARU*tIs&$z8EQo3>$KND3S|2?p)atoE=jQSW)G*gZuvz?O6O+-NsOVdPE!t{D{uT2`Pp^Ujx0AAs<- znh0Vjc0k_dXgBjUdV5I)!~$Y^{<$!g#OmXIL(3vgZ@0=qx*?~qpD z{%ImaAb{^MsPNK#{%%j&O3u`QTCwf?4aWu~fgqX}p6ERfVoZgsQk1QAu;stYYE+M`u{ahMUec>29iGQS-$7og7@1Y^QABv>Ho;^=7oHtFclD| zyorcN9hrg=13QZxfl=N=p^@CeLl?J8v$E8IPtHf<=YL7EIKFw~^W>sPzJWH&8>aGO zSteS!zkoD`ZDT{a%>ZB4@n1-ZT){T-u}b>52~4>nUn1cPW)>4C7&M5K^q@v(l`LwXxf`x*K!-g+Hc;F4GY z4Laxt=;iwF?>x%4E5z(V*(&Ds0{n(hN8%r=@7bWEtaee*O87bw5z`U=NBoeQ8=0@w zF07@%vjjdw3Z|8w#U*DXTgu&jqrL*Va6Achw9c@l$mo-A03$X9xXwExL<|G5;GV#~ z!ZS~KC3_~mkOnsD&Udv7J?9Dl15K$KMR8%`aSoU>l4%aTKmU3Fpk<)s@?c$UNmAA@ zeU;5Rh=e+@A#FPy1kH)Zjk~eT{%F~W1n+B6VS>p1Rv8cX#8!1H3UR%y5DpP~9soZe zAS}aAo`vC^__$sPFOqaVfE;XK2~EP2CegGI7~3H9AM2Qp>B1$^d{hfJJFL`rm=wHqZ(FMU2CDAeGe;k&!lb;KoKQ zMe>jGl~h24Un>+g^dZUm9TeU1UbXWY6DiyG)8u!IKb!q|7#Nc>5jl{m9K?v6)vlJ* z57#g!5HtPTMgalfXWkuaXcAx`M8KrJBm6L?pPyy0AOg1R^vrJ|j6;IQ5#swozXt%Q zkU@0(p9z5={I?9maQ)_ah+dgPEDY4`^YjXbsq=x@@Pj{J>vRZ_Nc_PH%2Q&`#I}xb zJn?m|ES`=4MWlbm3k-Dv`___221KNmB9?+re)5BO=rrrOQ>VODup{~(w(H>?x94877#5bU`;x>Efu`@FIlOG% zGi+TGLNbolewIe_i^{K`c@u?>uF}$oJOwm(w99&QXU{FY)UiDD219|>k7Czkj zs3vV&3C|-XwGY^aHechMaZiE*l8pL8K0;>njE2m}$3guvPBBIh1`+E=HskN2@Sqjv z)L2N?7C{>{OgwH1V@tbYYE1plBlt-PuzQXD0m-j=KVMYX&cx5G<}f!L}n* z#PsOtKexL<+8q?YniGFw|E%J}efvF!A3q_c3(>;jFr>*yna)nGD>7j&8Psgx#^2@I z{m*#O?_u;v|27#9ORMI5{Ljw6k}H_qWyq1$&}(a_SNW}K^7m6^|1d>es3@(*?`Qx% zJa=c4F;i=6lU zJ>jKbu;gn^CJhC&Eoy@m00u+BWWY)Jyr!oe&MKZq@heRSB4#9M@txRu9m}YxCBy@J zduly1BdZa_#C{y;v?PZ6!?4|?IYd?`ieX_Unr$!D$K=X>QhNTWO-9aFUs?&|`sz*n zpIH#enk>B1G`ko=GBp}i<5Kx)ww173g4!#%g2Anr`HPTYakp?>V!@l3VRs?1BW(7& zP0otjV{k&?dgSMiGv`=H>fNn<&6U+ZLkGh^UQrBQipQzT-5lkrOdm+6h39b@?hf`D z8R3C%%>t&MQ&q9#*T_c}Z8txb{2Bd@iJ!@GsVXEa8YPkx=oGDj#3o0cIb628JwW9l zdn`}8TU0RIg~q2K#O3^J+GUM`jcbw0?M|kEr=#HPdizVaf8?9V7LW#>$`FO;mGq8Hb?X2qQlkN&T*} zt3DZ+m5f6QG4sfNx3}M)K{TON4h3q>hF}AHj;xec9y%inrWR^mO@yyCBV zFrj}}6a}6ooth~RM7jT0CnyG-MQXMKA@uT|&E<*0L3OiaaB!|SO zOT%BNjLOseee)!9?hH>Tp>QoC{q{UIb^w^@8i&U@X7 zQ&0QC@=O764wJssV}|q2Y4+VXhnLdr)C_9X@zDdRa2Kd90mb&uNNAEtQipZB_YS6( zdN$C_7Llr|E>V$-#z%+u5-)wp~^D>PY6}Yd@<;i_%uS6|YZ!fk6IK={j`ONQDH6>5}5oYVg7Q|Vxqi8NQ#MpV%2V#NO<95Tcy=F#M*l8 zq}eSj)vFI%U1bx0vN+-6`HG7kHq(A2cW{qM#n$6#k>jaeX3fh*aph{^dmN$FtYEHx zzML2kf7t$IHyAODQjan}*r&COJLq=Iec~u?dPV8%EXv~%fp7QTwL7V=q-2d^z4Yhq zC;L$nENDne;pfXfL3J7uYYO=yz2Xzf1u@=Kc8%2Sl|wqLo_SiHVQ_r7}b%_b5E|P zG$MOGOe%;NX<90Ci=lid<>$VEK3-qT$ko50pIYNMG-F_60f!Bo%`QgmUmjo9MxGxB z@^ph1mq+xccG8X=R}j@D;Ag{$PV=@A)Rl!?@!QzX;mvVV4;a1fgadK(EXrBSHvyX& z$~O6X;}Y2gRp>)fA4y?Nj7=uiPIXF=>6jwYNg6_>ayo9>I``rxyJHtl;tN=)J6fX& zAGYzlEO%oD=SRt=ZCvPo`8+?wYiVhUso!)&Tl3g8>z>%guyjkl>OHV{RquZyR0o4U z%z6V>Y&R07d9!PDk9NSfQ$L0oQa)f_-jK{K_*)OG%1yP$&zd@}oF^U5rP)g6VktVq z>P2$!@)_xsf1L3XXnS)(xM+-ok-nt#R-O!7ZC4l}+ak+l2kc~aHOk!1)4A_Zr-hmw zvkRGxiuG~f=aquIP!W+y$y#mnRfolTSbN%CBora!a&~D!=B%>5n<(PpsxE{3s~7EB zW6agO2CwynF@t3%RC3f>sL!je%galPkX*swK7`?p#=$VuSMSo5Pdf_i7J>A3T$h$q z%({lCOd3TgE`(x#pD@X!37wK8;q_2D!d7p5(9Eq+bAor+`4AaNF!Co(j_YV7a_KV~ z{&-{i54iewQm>C|A@6h5(#>`75le`X_GiLs`MkEse+ja<(lQ?G2QC(4=K)Z2te6MQ z%WinX)R5>%Md@j#Vq;qV`qjJ3I14$`Jy`COWO9GaS&XWzC)Os6hjjY0*0Mq3VdY3Ty(ch41T z!kw9m%nJ`vY(NQH08sZ-DQi~T5-RK19*folRz!1;6LI~=X6IGit?Be0GgzgEy{@1b zH6=}{SIuWWUZ0163nTQC`dJ#xdk*8o?^EIMw;vB{8VXA-?@xQE*oMXwWlY15){yB| ze{Fncj9X(V&JV5(i;0C)w`6qOrUhez{zb19(Uo! zjnU8&?_Nat+P+*R00IGnbk?yRo+C1&73}|9KCG4#xHeJ7Yr9ANVm$Df^DOji-A6Y6 zG$)2VNUYX6L+ehC*Vy^^i7$^Tsckrse`d!Z)xN%{BJJfTBbF))ZwqTu#^Ys&!>%6GdQP^1e)w-UZ z3XwGU>2W-ha4{~hWv;|^=-0cs7p%{~rPpkKN*=XI$K+Cp``tbcgI;i#24O;^Aq~KV z6om)^sqKZpNkhZp=ies7%qD4qRW7SV784V78UANZo8u%>K4*e$p8HA>UJt=p)hxun zTx`hJ^HZre?lzAH#LqK*20?lti`hNRobYz15+}@w+Abj^>`)*E4!@f`uP8i`kEL#I zR}^1P<&@l%a;E2`T#Zc^VN3YFrY5=P`OoZ?X4L_b#01U;%k054eA=fIZrgT=|&5Nz%AS_lN!O8{v4)3vZo?%RxkHGji!y5(YHAf_Ew`_Jlsm;e_mR z*rCjM8X9ND{Aa%3ku36l4l#Zaansh*%qnM}$iToeOA!^__?=-ox{~P(VRtF9Na`&U zPxHzC0GSQpIGddlq!M#P!>&Ttyr-lZESZJkVyC0|yr^HAxhQ$qu-HoPCL64jjVjs_ zu?z&`IgQ>WJl|7sVqg|UC}32R?n?-IzLN;2Q$2E#CB`q)R(@+Qou){NK5!n@X*G?B z@mlkWRs3~l>Gtq>&})x`Hicc@2suA;D3PFuQ|JHWj(*=M6i$S>L3t1w?R*1dbkj;v z!V$Cx2+K?{(4P>jju$M;iQP|TzE!p{knAz|-%8eyl7S`!zW;L9*0$eqM{%Bd_NfV#pjnNVy$pu4ts>Ee?!jh9Z+vcB6Ma~dZ&lGw zY673UZcB3VxiGr_c{*BVPf*I9H!7I5^qb|*PSr)O`94I4${W|bILOW|j4T$K)_ZX| z<*zF+B`77}ogJX!9PITTE)X0xc8v1T<$7lNpcl93n0@Vo;%@rx;sy&vuOnT+5n>R< zS2_*UG#|bMCQZ@=LCck{K}=DNOqEi-vhO-xby|1pL2Ay`SaM*dj?yZY;7ckT0+gIU4Ka@oHgPjZd3TgSPaNOze!6t?|fRUlPtHoWvy_Mw~bl@_*@ zHdfi5nQD0C+8mcv#ra+N8~#_x()Sw*cPA~yONb3T{L7!{1Q*9L%4*%8opM(i))rYf zxm?N}{Ous-O9$lMY^PwU&va)7?t&?>zx-YYX*wMAd3L(LIFcTi(^vNUU^4di>BS?V zNxf8Gp3@(NE`Xzwlwo&-J*siV3?%j=*>1_Io<$;2R=yz3Br|VVv&PXZh8!az0AYJ= zIQVyfWJu_SB@4&x>D*h%+pf;bW1V5SZ(|fff8UDCBe?rB{>SLlLO~A==E0}%S;iGB zT`TuESh&9Hzu%Hq(at+@05OSNn4HVR`%#NYaKJaZnE ztn8jfGGsNpYMxX61?Uuofn2PaH!?kY9Y>I z_Z>h-ZqRF#&-2P7n(6gb6cHYSNw{RWMSTZ*Xnt$Lo$m2~&dq3|=YntM;(kl-K)u}r z4YXz#G{_h3itMsMVQI*s&;*hWuog3KamHe{%{q{fs|OWhq2?VTiO2dS>4WrK(zU9b z=*(zn;y+s?#@x)P$CD@S+IhKEv=1iAmbWx;FadQOULW-_v{#o#5UEyJGOmwm+|SYY z|8UUVaws(!1Psbg93Vq(a1nme1SX1z9dbHc)g$BVSu*#61_Nj0f16|uG&;;Ny1hT0 zt61D(QP%)#a`#c_`m|9p&d%wC>0GuE)&btJBE3=Bg3?)&S0h%t40QNo6jlu{t{u#F zC!on3G3x3g?RiMfW-fb(ibY6jk~c(BQP)Gq+{Lz=G7kzb16ICUU1Sa~b9HYxAURo> z#jY~cK`!D#XQrz-ll)cHRpj7_uzt7)AD19zsoUe1RALv5VyF4Hvg^BjgIyi{6n4W! zSW+a2`_ef1t)YScRF`)1i85Z`##8)`pxWbW_4B|3zg+VVLy=EH$-gdHnq@VYj#{1J zuSL416_VHX*L8HL?Sqq!+S*31KiBZ#xvFzmCnZ?UWuB=oU~tp=+>FtJalUM9yW%U6 z>Pp|t8Cw$94{i_}$BKJz=6Q`}yg=pk*5)ZMWs*-d?3|Xm5H>gLWwnk z!V$-3HZt0d*+iVvmk(9Yb3I6rT1Jh}?T^e0w5U+cd&SHCWlrab;sjLVDHW;mUv}3^ zGwyh*9$hMQaPPT0|B;eN*@WZuP;_V(x);@b3>$>a9x|t|Z$V1#dG*Jf!0TaND{7L> zBHXaSe$cVzugv%M!;lb?$OOZtG##bu%vXi!tpTO_>gz)*JW!sv`=v&+YD8USwc#ua zzTl7LhE*0?=i}IR@5T1#ue~zKIkZBvIZW-g3AjTCQa%p@8CaS}v^U2!9wv}m6gI;$ zzK%#>+n>88B8$eYtJN>Sn~Oj?z>==}bsb|q&NkKH#K!+Rzi2wEGTL_CLWYljFDiOr zlh5cWtEmcrj~~GK<~f>~7d(+$Ce7zmOJC<%ti1RBKMFSv^Dl)8{vfUAl(=MTCB}NC zKD9Fk7NEa74AN-Si}7(|$;DVAur_qv;;mzbXER3du97(zoA#|^Mr;1P`4?YzO6P4$ zV;VS(g-uTNO^g19lOY5{BH>+cdu*=;qnKK?7&SVI;nQQQF7?ClO3wxT-9=9 zz+dy%n^ucGlu0*sc}T|O7~?RH9W>j#(?QDQgF3&@&U9*IL8{q4f9)Inj4F#O4fk4e z;8lb#%{0jBymewX0s`mB9u{I@c*XA4;P8tb4X1h$X|8wa#YLoKA|HN?BnDc-9Wj9_ z5mDWuA_OFa{GXtQ>&~)XJ4XNKw*XLj=lpl{MvXMBY+p4bt4)qHa_6^QQ|RHvOxN1; ztpqnoBo@BT#o=1Gr6~+Iwli&f$LDpkFGKvXJgXOvc5chaP-2u960{+bU!`n}PMgS- zgpUapqTOB(O88|_9<$p!l(aMyiDeMcMb(!{1WMYMmaJ4ih zUxcIc2fZqzAbO+iA~$0&RuINIkrZ{5&JldgJThZ+90S=GW&5h6hvPM-F_bD5rbqqP zSkOJg{;alz7voVF1AY;%0P-!$NWj1P5-Wk9NL9M~)QtBL8*&L2C(Xn6U9_u}QLtmT zzvg7l)~~Oy!x9-K08qeujMUdF$UU2N6GU>VqxZ(md`E2q`E}AC9s}IIUp*7z@G
EhC#Wlu+#rk(|g)%>T{KTG(o4r>hYG_&VLkL)ii zU>V~bGS+UMX}tD77;47t)KjN0+cvK^vAM6s>nSzn1*x%`k5XyFxt#~ZaNiRzG>0}U83HxEmVjldX81531?Xm*Xe1^szlQAyce)M*9|)aZ(578*8n-_Fm8P zoZV23bA7V6l3(ZIs>}1R2JR$hR7mIoQ+hr{LSN4zw&!a_P=kqtXbOh$ify)8=NZ?F zM$Rb!1j?p<$*OkAQbS3HE?FXtzkgN_nc7tyCCeEDYY&Rvp;bZ>q>k zx-@F$S6hsoh8LO~!Jdi~$)Mq+mWf}VZ;Gam2m|O!I4g7HUaw9?g6y|P_SNc*V-IZc z_zKWcML%{A(uLyGm&lXr(;$MjsZR`^bxPooP2*W;}!7bM4ZTl%o`Ka8k^w=VLS^+vK??bNsoEQcg)tgu826 zQ)qkNyicF>B(17lJDnp?^@fe{n5`Wp1>pZM7vKFnIit{f9ym#!(RgJv+!hwZR6OQ+ z>e_8KpMRCkI~Wnh%4QAZO}&rj?fhakF<%#0AZ;p`N~Z5?ZJKiCW;oUTIQ_&pBj(Ch zym(qGv@NN)$X9%b#Ts8Pcdm|35cqr6ow&!U4)vJBauq?l@BAwD1U07^65$kfX8tp~ z%{UY#{I{6c6tlX38JOwA-bzT(;+#9rbfTE*g7wIn;q6M+noDf2ssiOM-C5pvOopVO z_nlu{vsyyfS^Lqmlyp;{;wFN^c%(xGCh~gcNGl!n7`yh`lXenq$y2O*^&eN>H%!BE(z|opeRJz!B*u?K)&LSc_dOxIwpyC~o*Xt%L~~i-#tY1h z2It?0&7d?^-_iM>9Tb?KZmIQ<#KWQvvFq}$Hp*^fH9NpsS1zPJgQ9Ra6!DCvx#TF3 zKh53LD0gQx49t2Y-CK*zj7yVIc8zzPI6PN=P@->JSIRe8rQp#uVKh&@jG%Lym~fwDdr*;cU^2J2m;AHS>#)ppe0wOVu4k}x z-o?QAbiUiJ-^ki3JuwhwxqH4EtPWgUyzNFzZlP^Qy_*v1ich`WktT^w%jnMKM$JAz zXIW=xk?cGriN2*0fhLN!-lSj-=Oe7z)fR2)^!prx%fmE+pbk{l$VdGAJ>Ba~-11=r zk;SD_k;3yiaVn!EJY^G&n~f~ss$}>0yL-VhqZ+W z-q_fP_s8$KY2JeRZSR8byJul0X^^j4TI!#{Adk|AJYp%Q#vNXkwmPSscp}J zW2}z)Fl1%0E4`dAxqdk)Cz*7Zc{S%aRdM8hAd-rz=hPw}cAIgVBtybrS!tx1=|*z} zIs`UHJgqS5PMxjz9~Ds6m}k^CD}+3|Fh;qqBiE#>h!@SLC~c!ljhtI(S8+R&)|xD2 z34O(6YrayUsi4mHsD4D={gP80cEvL>v%}5GUn1`hg(t7rF7YC9y_bxZ%>OQy_~E{H zXo{ctfqXMpzjFP3wN*?mqsSE9tS%^l8v#wWA?SSZNKSR18}AspD4ukgWmr$zAR6)X z`{q{5>2m}L$~XcF{U}%Wv4)fGbeJKgCj1~reo+m@^fW8A>L=@tqBkXXmo?bXZX{jF zs#nw6V^Kll!c@v@U>7Ls-&&`KXND{%!eJ;KjgVlQvsGJDbpBE7el$~{NX*c9Q2oG^ zo@yxf8I1*6|MAB0k{q9m#x!5e*?P0pP&Uu4sZnsqnY~#~m56MXWuWtg<+cs3PFrfe zY!EJk7FV>!!);uZjpDXfN}xlqBszUE@2>;rUP&1VhQ41EVZ@e&4o>4nlX5t4w# zX%Pv9c)qgv0F#MXRXp)(dpZVMSrwW?t%giY(0>zynjT-M-9<&S`#C@DW5uXVQN469 z+-9zzL{`Q9?T&pHN9#OZ#8fpoF)GXCZt1a)vN82Rxf_32+r236=^&w8xz>&KXQ11` zOrB-l^?d(S=F6Z!SQx{;UB_&?vVs@mfNYL!tzxw_`%7|A5V8dODJ!yFn}qFdHbEkH znjnK(n;}>oMqn(8+(J;iG>X)wHZ)^iiv-fXdY5TbRY$@@Xfs8g^caSBSfPVhrW!h` zc(G^Jum%qEe5YrOaC_DEuknelH(QZQMY2SJb8c=~iuimt%AB1dVNu2K>$k+O=l%5S zP2(ZitnvdG^6g4cSX_>|Mf~aw2oN7zr2iBoV)(aJJI`N)0C;WGCALL!Alk@4T+D8k z6Ajv$%AQELh`0t62EW?3^HrYDxWCv<(#5n8`g#XLYw{G4(4^KbVroZtjJ3gHyvO`& zl>+HY)-AdV1`{c)CQeK7+C(zA(X+H;`c1N#v@EfNyj0ZUz@;*2u6WoDmz3w| z>_ycGtuX>w<3Vidv~rWu;{l~=6s;$6Z+jM1B$(=-3zSKm>29*=SZRhjyyrK^UgM!& zjCBQPdAvu`ZKfq!tzHa^(!(5(%xK>pITTAIjFebHe}M%0KChhf-Uw4l$3OStdDTgs zT}sWNO{5te@O*VxhU1?hk!K0-0?ceU+g>z_i$4tisZCLl=#qLbn?4cfTxsNvGJrH? z1wLyf%<(x_Ky_|6COO})gw^4fQc}{#MQUqnA#_D%o8T&aw>bGyU%8k`MMi!Q%)@np ztYTn;(Oa>)T5H=J)#^*VybHT#WBHlG1M_9W`daa8P=8Ju7+4~y=W!y9o~+?A12jI{ zRA|>9BBsi$GboG{>rhy2Id{`y!c5NgL>0UV^)y2AZ|Bb$XDIH6vNo5FX>KiSa%sZc zXR~5a;UEyws+l72nE7dggTGw|Aj{4r-d@Z!A_#|qhxL%!I~E@}YSZ^{|2W{e3v(ve zehgth95s$HAIG&`A#q*V$8v4Gr0z9wXynrVO+g@4fw>%9&W=63lawtx9G4sy_*NS; zIn~|yZNEsbUr(PV|MCv66@N5+T9~&St*=L!gs7IMJaQ}}cIBQb2eSOGE*^7z&oDAs z?DHRWGd`FQoqozF*DGbahUyWdxE^k?Ai008(Q~+C+A-h$6xS%PUYekj-Y(42* z#`as_5P!$e-Si=&QoZK5e9X%jEH>Gk&#SUIt&hX;U~Zr^48=Et%ckH!BF2{C7NR&oFmtRQ@kNU<#{K@&$EE4{UY9- zNJ|p%2Gc^5r1{KDHDqfdwvoG%h)uasvED4dp6eK?GxD07t7j~*OhJ*M5c!`c`wBHa z2;2`Iy}aiFeFMM%`7!bA?oBgtt)WT{Vqx6^ z7>i!u3M<#UZEN2nih@~pf;@-(KmgQvCMswYSo$9#{|LS8#DKb~QNvp`uo_Ms#ejtJ zN&$B!w2fyC-VRjTZeM*C35-nw3fYM-04l8Q^C{E#B;foD=`wSsw+63r4OeWy4r+xD z{aqEuQHEY#<@inh%>l}8rlJ6z3-}`r9cCa05Byg7U0&D`bJ3L#J;oYkoltFo`*#DG~=mw)P7Ct7|!j%_dGNSq1MOg>sgua|A)tUDA{-b3= z@CPb$!#4LrekbOG`T{>k00A86J-SDRT$=5Y_kk>(cPAhGBf8Uhr16wbt}{*4m9tUc zMGJUjnajKO&D2LhE1%e$i2`Z`zbiKpPY@r)Z-!3i{5GjVb#?3?Rf%g9;8_&1k*X8w zxq0k`{h8m)%iB||Hcne{Q))s*D+Vz_fgU9cdKcucVEF8?PtJs==iOL%8vBDf+E5Y=4lVv`m1A z*4J*StS2YGzfxni$x_*^M*{(Si}Is!bv1fla_nu+U?KO(vitodp#k!NyU{8sjo^Ax zDQktkV7UEJ(Eu`87sCG(%7MfA+e?+bd~OJBM!OE-w(=-*f%v~8UJC&8UA$~&7eq;8 zp9b&m9r4mF{7H6q$ydt~RJ&F@e4hP)5GP0_HO)5%+sZBAq%x4m()aLM$SvXNIqarX*O7 zClS|ZSHSOT9k2HOKe+!r*89GCgX*#6^Og^h(ZRzEKnKe6`2iq{Hu#mx_uc9t-NUc$ zyO#5a00kHsB_2ySCt!n;OhY~cs`p2&a%s7cf?b)hyu(cD769XY53b*^AA64pN`?jM*GDZjP)DzOZ0REMgTY(Y}MC8 zYvYgozV)3^b-UB0c2aYm7+?)B#{};1WIA3oyBO*xm+rxs`>zMF`T1nuB4GCY4b6sGrTbKE~eCgFc19V0c^0bBST)s>EJu(20t0!u7m~VP4KZ9A8$#UG=l2)Oa z=^jL>4Cp`iwbFMQ`+VgCj(ub4bjm4hGMd@U;r;?7_MaazX#^ zkjxl<^+i%;*PT!25+Vm87?|oYTiW-t|L7tSwWx={1VFmCnA{P2BmvG zzj}0-6cgP5+ZLD%z^%dw27shOz6BfvqwH)BaaRSdq}j}NxHl#I4qgt-1e6uTqBkXbK|X`}L8NdC zWjHpEzi0q<;ZCBD8wioSv@g9JGdS-l6XEBjfH_htIno;*FZ^j+qQD7=P;pw zf*v!JUu63w(cO*!N7>KcwlWe-raM<239+I@e}StzU!Y{p*cDFEG6@JstVh-=_}Yqx z9#daM0iXms8K8LTx6G5P?~R+S2-DuH(SKE~M1c6dYleIJHdYiDE56$_ugaw+&MDHK zMRTDN6BGB41hEtB7=gkD%-OYtWsWDW;zo3TxCli=G^9kYVj%uU(d-+WpWL)pf0J}7 zfLGl1wOmr`37Z3tXifxt?8%vRWcxW(?X{?fv0@KU?;`!nI*7jqsk~TmoKh$#k!eU0 zMB{Rq$vdkqsGrQZFroQI#yIfJ51mxMuw=fsqK0+c`*ZbDY>u{Be!-$-*RK7q0sV!T z`t>8J+YfTT&6Q|%6@}sJl^-Xe_4KV5`$IzFVEZ{^X58;@<4AGcKCYk%U>X^tf6i8jc}nZ)q&ufdn2F%{>Ugr+AtbD`VtZ zuco*>tq4o8Bl@?V>dP+#m8hIJM|qW{L;L2{ZFgMf+QdE?qx z+Oa~n$k*C@Go>-To0kP>v>pLK+m1d`5a^NcEsQ)eAAz^@5rW=wjzkg_T`2t84tP)= z2%b~e5*-*V1RhhQrn|cS&Se_W_2U7R&#TQ0dGGHG170)wz&;tS5CG^+zUHS-_AjQa zMH3o}^x#%47Do1Oer-@RD~+kPTF?Ad5z{g5^7vZ@p`FqVs^BEP{4jsDUW0ldgc?M0 zyd^nectlOz-C697ie;FLrup<{=jML2mWqOt6Z~`oMoUW*?LA*Lz45rRFf}R4QizBy z20#HRE&D*Z;QpO~Z;0$zz;`aR`CRC{$`yZh7EmH1Vlwix6MuDJ?m`~FV7~)0xWGVt zlAVRD`^&c0`@V3tGgKeT7>S_?CEJ4<*e)h#WI;ozzgmt(Vgx`JGtb6@6 zv2JFRp3N3W^Zm_XePw=xATZ6Gf`$Q|f|)vziKx4SSo}0x_k&Nw$Qw zri)pXXvO~wlzae?M}nKx=J5Ub?Xs128K(_KQhCfdyYks~Ih%8+ty$p!NkyC`yGWpD z>_dfGosr5RfZ?D=v~!!GnDUfZE@vlULWxT?GV_FnS9tN#o+x8D{>%qoou4_^@k^xD zU`KC~`b`h?(4sT(=PbrwDg{gdBBx*bhul}z;J;hE>&QN=>96hV>0j$hLHXC73b^w z&zu{AqMz$Z(|^981AwjThF;gl!73tXmJC76ix1MnoX5mx3`#97#z&OL0~|r2BbO80@p=Vbfw_ zs@d_8EK%5-kA>42{!oR(_Xb?9ImPZ(004lui*Zmh-boiLd0at_@&M4CLeUCg8DZ~i zRR-o(V}+oEL~wF!ZuxGXa+;ojL7`|Uxxyv;O>1by-pDe0mZ0E)ls$W{z9ppfK^YN2 zlOx+;#BCuIxFm`Ldm*Bb5^q%+xLy`2fPZPD58Y z-a>c~ELNeIT$-RDQeoeH3pyUjzb?s4=m)3)rBlx>QyH=`I{M5KmPMQQ9mzYxAW=+w zFHA9sT9Ve8DA*KG7#~Gy-Gq!<6-2sOS6);&q*xt8u^yhPvv^&RCAM&Qx>;Z?W;rtH zef$PV;0_=hjtbv`@9dcMXYE=e!wZ zof*m!&GX#0RJ$J-oMC8W-?dd5v_uk)-?oN2X5FTnaO{sA*Jsx4?WI2-*I;vCoVNy! z-L^BI8x<(aEAL@35qVc3p`;Ym9intIKgI3CtE>o6rb`=1QGOoJ;M|TJr?(u*<1*%u z-PSLn;&V(oNA15u+vDq;5az>&w9_B(^>gq(AtHKt!znWg_A*R0bhe9^~EARLBg-nciNXHI^JOcr=yiVB@N4k=6NiR@-#HlSoHf?YWYolaJv2XIit1@9f+O zH%u+cT+UVQTs#jxCM3c6KnNK??6N~dB!h$`*ue%}AtLKAlZy&O$O}ax-Y})zI>DyS zkS>{j*fZh6IWBM;J>*&9puey$8870&X-AQF($aiJ9R6(s{xoC}$~Idr+HTZ{C)|HZqAS_hNOG zsctWLTC_KD^DCzk9g;%C7EZ%wWr~Lbr+HQ;(X+vl6n>ee<*L=cRmLZxqZv=}G)3(V z%7=1rRXW_la`(JPU)@Iu^!AO+OTA>8sHdv9mz>jn5f9{ zsd2%|%?CU5IO^lM0WHd)!ku3eh zS3#rrf2f#ZB0Gyg9`|qE8=Dkh@{_@jd>j+xsDc~x20Ox8c5khw7}%bdQuWL&^8o#9 z0m;9-P0nk?bw0@2(VHrKyP|0|)1~vD0Xc1vr!+RjQac9ot z!C6|k$Xz1mYQ8UR*`boiWo~!&)-NX6A7%@#KrDiRTl{C`0|J&2)z>d!r)3qSk#h|h zq6lai=|6UA3V)rj-Q zj)wcFDtucmz*@RQWK-=;4Mo5Nu4xPIB} z37+Ip>YTbpI(;vbJ;8EJr568JE$yE?ub4RYJDT7zf~3XFy9k@MQo91r-aRFVQhK%0 zu=-t;(HdI|ID=VHcA3Jn&8og3U?RH&fb=xCze51D&EchXMqxadUlqnnvu}9-m5}ODZ&Xv zP6!CZuMj!EA;-rG{%SwCba~2HZQop%xr}coN!cK^+o@OD>Ttiypk=b#n&5V`A;CT$ zRw;)pDA6Ce%t`AU*|lt{#fec9214Qo2on5ru>|pxyFC3fw{kkEweFa`<;w9g1yqu# z?LJ5rn%Ro7gfkARQ28514SUkhgaA0y7X`BAe$K%kKmP9rY{XCxk6bBjy&S~-h2eG5 z;zsHBr+FUg#mf<~{5j{hEMj2lqMAc0efv-V0T7_u3AoJoB7SGO3Y~BC`f7Td$qVY9 zzjqc{VV9rY;$yDw+k^NGr7(y8^1^)Y=E#Sdl&(NsnCp)x+~bJE9~67s1Sc5`DGq6j zmcG;Pj4}ExcdC^mVO^jkRKKPXKHh4Tfg4B{3&WkLi?f6TOCfgrvmBO^vvWlizQ5! z+I21XC4x)$RH!oMET#_jLcdJM2}SSwJT2^zot&x|_2;ZkdT7JYAY+!+tH(K6RCw(l zf&Wek;L>bX-0~AUo||zCgTpxKsM+2;w$}i*ORKUn8gN9vUjPoMK~;(K zfF&WYZb%PbNxfe>Zi)&xkBiPAS-TYem2OR#sNdP{<6QuB?NMUu-`wc4nouSM? z`uY38BVGRhoe>=A#{wO+Sb-!l#u|`gC1n~%D7NrTohiVaz{4jNWqUAs?e>6Fbbw8F z!bv}N@RDI zA9rx)X=$Ut`mcrhLb_1^S45lk?MKL36?!}rn;Q&;QW(M-pV=OuVg}LGT+B;%O?TH2 z`zTEx$!d99LEpsooF8KB9_vtEPkLn;7|N{&X7b!@siV<*b`z@v^mfxfK7@BBoG(J` zBl^9D0#OB?8dVr(bs+oEH<2WC`qRZFU|R2qvD^f#vJLeN=uVsWrPZl16#w?q zK<4Z0(vNm`e|?>EU@{_?UjgY`jTjJ`pzd~{`ZqfgJCjQ+Qc?hB=X z6Si+5O~Ca|V;q)LS}^$8vKBqeRkg%|eyyjdnzy`1W~G(+6Y$b<10o<|m^DI3&if*Y zPcihp4G@_h=E2HTy$uESMC@zA1gx{nrQ%tIit}d|VJ$a?UH1#=FypV(|9&8qmaXvh z4A6M)o}HClbln;PHMkrifwM^<;Nf-^qTbB|-(skse*#?s`EMZoi2+;zUvd}o^-Gp1 z;(jCbdp%Aknr5@|xBCe}CRnd4po5guq>TXwXm5|-)OGB0f7TP7?*E2RkH2xv<3{8% z)K>?zYbA5Ur-LDEzihHdz{dd8 z@*XV`Yxc-O;bR08x-Jd|T);Zp5=A7l@b~}4cSJw%5Z^Ei!q=dz^j`@|vV)s7_m0I8 z#iJp9w~+5%_f*_Xez3OS<}($ozj+XfNR?1M4Vf{6loSb>@) z2>8F4yGy8V=f0xqkZ)0P?HddcjBTZ@Y>967xyQcBm?usdM6nqN$Mm){=M9Dg7@5nz zCU-gfU5X#nSMyHZWPrZC#(XAMEWS%n79wMT3sxqEWu%2XR?QmusInEn`Ct<&h_%Am zA@2IYD(R+gPA${nQOIBYf3~z^0r~EHKsE6!V%2ziFd}a7_`Cw0a_ynX(;<^BLtcH0 zK*L>d;^W0xgt9Ta?2;Saya;tD0@?xfO&QW%$ttf>H>`)gU1{(M@EDdF;TiQ@Y3wCv zU94p+U@w+!{+h~yW~k0HLEed%HzFJvs+myUQkfrLYXgtkF{v|;h;ad2&s z5O_ENTG$Y6z?v(eDlXql`Nt|VXE^>+9n|rFsz7?j_fieeu8nxmt120aVW-z04t>}h zQPNihv%5(J{gD5+@L%hKUVc8Ze_ge5Pfl!f3q#H3i5!-MxZ73nsNM41aJ(PX$eFM0 zk18nqcpEZTo^rQs$TQZB21Kroj&BN{)55-))?f#L6%KuqL>`OWjzU&#goKx9%J5lj zVJ)!=G>S#?tNX?U>Ce5^^_g~tF~uuTWq>-H;{Vp*W`bH{Jx=bYVGg(}Gf|P-2HVKN zQBF82XB0OZlaA|tpR#m~*6!@nZy$z2r=kQjXh@~Qkhy}Tcx-g4RgXx|$I4Ak`HLsi zLFs*Bt`ihB zjSg791#aSdcdH!H(*z={Ly#wP2lU?LagBRBUroI?K2C4$ z#1Dht0Y2np<1fjLV9SO&>G3zJJ^e;H|^OK8W~$ zh5Mn4xZ=f|@ntTA-2A23CAyYfCA33YAN{fRnD_s8%N{1Azp0R?*P=+2pr5+Uot(~g zdNy8h*wX}7uJVa{(H^bdAVXLhI2df(>iz9-JCyM)8rK>$6=2!d{_CUFM!_72-=O?p zFu~o-9z^WtN3CVd^0g`mPAJ@!vE06*Za*GTC05le(V3G3Ome{CVgE@3|3-Mcke;*! zL4$)~G3GJ3vVHri!%Ti3e2HWHx5Lb#v9td5wXi36+p?1gu3@+`K>FrO z1c*)@2#9`E1lNarVefBe^p5$Zlke}T6uTPrm5}6_zgyfKDLC2#tvS6Uw4F(m=F6<{ z^2tkd2YPTn(v61aGO)j!i2XYarD~AZ{*Xp|95k8Iv`e1NPhD&Hnrt^!YR)#HNel^( zS&#b`E1rx}N*`4Xulyy~Kr%Rh04bd704h(Y%pof72T1{W-W}UNt&uT;BTg~n#Ay3x zPPrbs&qxxj^fb$Cau)XUqcLSiFgfCFpr80psh|kSq$H4V2j=Yl?KJH4i4i*M*^3zj+H$yZ_gugKXZ%I8q zh4^|aJ)$o~ONkjj+qCyrK5QuRCEf}$7nbf;*Hrk|P4M{jN%4Tg0eu4TY0h_~u&h{z z5q;4AuCY71pwXlly)Et`EJ3g~_*9=a>TIt%Lx;jx#;0n&m-{b=$s{sG=yKh`__M8L zroeJSzJFu7{9ii+2K$Bh?F|wTi$JcbyFg2nBA7v+;G=yph8xi{Ku>E6@&t-MA)?9A zLLEc>ueTGj-LE z(0|s6M*kizADx}H@3LbHk}z(%gg&teJ~=uLvcxOLN}Qq1PgZ_B zjBr$daAFOJ3M0W_uZZ8k{8U2b-7V3aNT&-=%(*=Pxxz~w!@}!#J{o9Exb*}b`7xhb z-nbWMyEapocx!FSE^c1UmR2>GNuGt;*}k>^B|nQo`opUK&k^~25}22)EC+>@?CNF zqMJUP8s5y36KmPonxu=h4tJhoLr)}1SGXs*r~28`f>_EKBCuHY5{0Zw@)5%l=` z5nKC@(0`+pwjcp*jWW*L2}AJxBJ+z&_l8=e#Zk<*I-u{lTD;O(K7t?vk-JFmRw(P1 zug_S(00)Od1Xw3akLWPfx|nZznxcukY@No5GBZ?uhG;YT6((x5sKBE^{|Y0d=gYK- zpE_*afwJ--*qeR4;-9Tv+9VGt31Ni)TBt9ia%j&uHC>M!SB<&3w_9$)k0vdq-)l>` z{(kk46PmwdDXa!Ntwq%g#NURn#xLt%endhtqXPVML0%K<>;%y-QJc7rBTitUkjD0MO>wT~3hJRD=6Wd1%9PN@&(kkl zCS@27kc*01Tyi;c{%_Oj7aR$cRoU)+X+Qvw=>0L{rSHDl>Ee|?!Q zJh|hk9wIuYaLc0!QLqTUMP@6oBW{TI)!PR8eC63C3iWdTps**!Hnih)jJ^;@`L7N5 z0quX~L4Yvdwz~+Ms4zi;`A|#gji` zpr^#Zx_O9b)odc$mobbzW78C^nsT3XJgwsQXM7Q}u3n{mtc*D?xUeCgkC>^szt14amvs&}3boSN|Hj85#+6p%XS)bkH!Wa1SqVgiFyizODF? zLwF>#K*|We+slx-xA1W>mPwqTFW!Gs$J}h9pB(L|)o<4XiK*uJ<9mPd@UTB?LRQMd z^ai~|I6!na1AIs3rx@*TB`}vN%scBj=V+@t!h})khT`rA5o}G%N@P$~OURx$DE;Uwv1plNKrK zIo1IlYK{ZLN0TMOH9*z*MpjAw;~QB+$L#|6jzU46kvBdtLrUHLU6blCS>=-_ zJSLHPq~Q1Iy|s009Em|>*JeH;`NVhb-SEwtZ)?9<3$sGa?=8try1a5Aa^PK+M@pD4 z$@7f2ajwrEb0zUK`J`}V$bEpla*p|bphXlLZ8J(A{LDs^WY2vfFCv_d?0LTRoMRno zUfz$~_FYZl<42QJ{utm?f(#ox(eH4GHM^5_EJ{0e|MH%$(@uy!1lp}R_wU;d;Z&K= zrEK?&_->ujWXStlU5a>a;+tz6_GYh)tt=M_I_;&FtgJ^6_=cnKOyeJmCo&8sc!5uP zG`?8I>79_N_!NE!j?z|Wj^kW?12LJ*0{$7VVPB<0QB5jdCU|I{WHiY&W|`1Mp+EO- zzPS`KzmX}V{WGuD6R^8~@A=Ul1N3=v5IwQhm;2`5h+PdZvS}yFV-b$GLH#eePSvyL zXhz=ZN`OR0UDgHSxJnxlJ*T~c;j9h1ZQ#-lcyT~I;mg{2#FESoghcd~1e6|u_O}=C z(A+UlA&Y!;htucaQAE%`5aT>0!xn@R-3|&)gw{sCqA&7^i_%tO`>|8>rqkx2@QY}y z&`7;$Ud)RFsu$(2JDov(M)O7spCor1gVGmY?ycyz%0PjHh1^2~z6)1J@zUt!})rb-p>;EUM)TJ{ESX=0~;T{6YS%Z^{CM_&Lh23i}{A_ zNQ}?zZWHt*x?8RV`q`&PgCU#4go&Mo0CFEN+}N!H+2&rxGV9$bxwH*ZRDvELcYuh7 zFKM+ag4V7*-2RfAfv!d&H~R_!c2}08PG?DSecmD_dF0us&V*QdeHhidW0!y!H z_WK(z%i$tf+hcEuxikI4Q=v?}R(< z9ZE?*_l-m9=gEgp{#U4+&4dc+*w$PYU`x0Sk$dJu9;VpXzTl_d<8;J!ee#)#=+@7M z*5on!h~a+M8^gY)q3}E-SXq=IemqdJ+)$`{M^&kI^N0a9-y5U;!SXb_!AjnUdoq%# z4cY~w3*MdQXl>!*S=7Gm8_#1G&@~R?qUd-10_Ego2K}x7SXI4d?_wIoo!8(&hY}xC z^7W(PIf~pHROc%t$k*yvxz7xJ_pUA|CNb+kjne|%{T@UYnfj z6=RfFEY=@ISkg`HWkd?eQE=E?gkyq$Z{SX+CW z7tq+0@VujythS;X&RrF(b(n_%gK(PA#dSfRaJ8IsDmhOQMXk*wvNKI*K<*kw- zvEA#E5SSY_PJMK@Z0r}K*Xuz0s{Vqa8_rI2jVZ>ze5neV+9Igry~=+xmeJ#6Py2JS zjY{Hx!rde%*QE_~MRg$tq16k7Enms#Ex>+mdF;ywmA?3Wobm&%NESU0l+pJ{GX^=p zc3s$%30FA^b_zNr8hP~Dx9-8m+)M9U>vdzmA`o275Gh9zNdB z$I+_O*pucqv=Cub1lGT2y*aKcr$*5=YB&`FECM1-@X;c87Q!mt*uC(R@MK>6LrBMW z(=6cec6xsX+cjXd^X3776S7Ru03l_QF8}7|pas(T97!=cwL-)am-#?HDf%0%airk`O+3 zFFRIAs^ebJp2(*9tTvHZlLrewP)E;uug}{6JKf9_AkAncLj9WS@0~0%9DNBh-WcH5 zFQl3~a^{X!S{#+cd+(7*svjN-cm-vA(W}j^Vx1G9FhBL)>3dt1Ih;oj*sYn0k^{Nq z89GaMZGNL;(p{a#_Quy|tFrDTe8NwHg&!N5uVpYnuv2GTfzP>pewJGuJ|liz&m)a- zI;9)=n&j{j47zQRr}ucM!#MVNbJ5-Ym8#QvG22c&HH<5Laf2FO6K*)i7a6Bs|DE0r zgMdDpSJ>f@BcL@D0n5$|XudQ6DDNUPDMHvTrV;gcz6Gu}YK>Fc%)h`7#v0xeX_h9% zaS4Mo>=-!RpN@HK7liJW6$z9MQN2q%xf6#Ci?Ftcu@6Vx< zPMAR%^gxeRX*GBd40r4xRl!MSf=1PNaft#8)L8X@2CJD!uR!?>Ex_ZJ*JRlA zR)zH9=Q?RnNmh&U;(qUw7G8X7D*E^}y9%5S8=s901p<*78K%{He9NkU!8Jse%&kx@ zZ-O{Eg#wGiA!#JJL3NrhwsvUL)qz&ZD847eX~8Nsdg7=*I+UX~G=~W*JAcxokde?N zF-Cx8B)e)NMX|p>{W)~QgKs|)(p`^T${1?89|lI~Tqhj5!uZiQ^+K(Gn(-B2;kpT9 z)W%A=jLAy=MUHF|OVYt%5HK*jh5vbODEr47zqU$jT`Jks1n>i7ii0u4X6(z6Iuf!JF(>XBx;Grhg!dUa3wV}r!DIdtp#r$Z0mofP zc+|#aGR57EorY1yjepmL402Vt{H^4LNTL8UKg32f4(@mC^+*()N!wYE2&w>c@BK27 z=oRsBkXvaW42v)Kl+(uHeD%C=e6eV%Uw}vD;*1HI?Nh)_%AwIlXIgxDR8qQBmRdPi z9STqinM<$G0A|=p#53}3%fM@0hJY#Kkt-0vLyqMJP^)CMPb3ZxRczxYs0Vny)s*_{qb_u{Es za5ty#Kl8>2(zl&{)#0)xALTK7zCppi$a{t|iW9LSNm+==w)q&^xvVf0JrQ;uTX>h} zIv!lg!g4_*TXP@Dc14q#Ye(?KloFEhtk-KOFD$J7P| z;zOm3?!T+z?|MSkHb;^CvRIiXNmX%@Q8$g^sJc~ zVNP{G&yZ{KugdOuNdN}!Hir?f^Zh2UtPOCgjPO&S*Z$y*O`B|1o09CeoFFmvsoix- z^N;tWEco>-@A<#hpd|1Wl02-pI_?M$ZY{X>O#)N+MSO{R!D#rVuv;Wi)W3QW5o)}i z#JDd_Ix{Qz04WO0Bq8aC3il1>urRAb?50aeI5glU$5;tIvFJypNlbE)oNHc}mme<} z)GFW#TZ(+#ZyiQK^18t6s^F`I1)dYFP&@m%eDugx*yZQt3z&D9O1kyAF!g-NC=oTP zD__YkZ$b+7l-oYGqNZ%dfXz~ql9}w!Ht^xW5;T# zf=30%nvF1v-%w3iIN9I{19|_qV{96p5KZu8#A6Ie)qTV=RFf(mDr7NKQ-3)~708t; ze_j%QY9gc4v7zKjAEucQQO-m$$WX-5XPr{(_o-mvfbT2ebhUNVG1V3j)$HuaX3%sT zG!aXQl}+E`UY`aQL7CFko#FLU|+BC&~EeJH!e182}$5k z5xfSHhf;8z(Z4qs@#EB?ZbrX=nJ>);gNj$4(-@8G`Y=aR8?q+4w z6LSV?9_9~9GR@?7UD3=ouY;g<5K2I#(&*5vN|}1L^P0ykQ$ZXGbO!h9-S`P@AKmld zHy3g}Tk$@!pxR_mzCL=d{ql+3P~0>JCz{@!7T`0E{tY|#4TiGK?HIem0-yE@3MTDd zTtZO7V1KqofbCj_SAPT>t;<~6W7~gE$IicNojD2z8HFgt*nMXJw$pyOLVtxTWP)EQ z0{YZggGOyWjWqY_0rWZMeJ=idr-m_M^HUWjC~ue!L8mdy_nZ~% zD$-TA8A#6kinGVs?>}BWthm%i$FvjE)|LvCDOq)o#@BmN)U}?YUKEmax3ubS1Sm)Y ze&R_JGRVJ@p3f?d~;jT#4UqFtG#c<{-idbgiu`HmUDv!q=kZZ}1g5CX1SS z8`_};kX2RH>XQ1%fWGP;Y2G(o$5oItVrSj(NM=7tnnp{sick;vbNEeUv!0nIh#eLz zg|xXe$ENKkm9O0Dir@_{q-{k56aH2Xa7y&E@B&HW*iyD{ulY`Yo3ex?)d;^NXg)}R zAO&^;LxG+L?aZ|sNRwk|p~3(H(Y_v*+zBS40YEEI5v!@^S@azOlkwA|7Uda>F;OZ5+q4RD5 zmP$ntd4M2@yKI?YwvFH`lnYMdj?+XfiZeb_XhK>mh|0p z=Vo!@ZC=&YKY_k-8Bm5hRX!;av(Poy7Pek7v&NXjdqvQ&ULor}K7EWfLyUw$2pYO} z>*@St_3DYQx=d*ALmaBt3a5Q6O75HjJ188ffn(cvM~PX+)V>*mJzAgTwYp=jVx14Z z<+{p#{VpvL2-(*jgI7Woq@{8b=0q`V;t@HrG(VTQMn8eWdjkFPYoJaXcG>~?aqbzVe@$W_C$3UIa~qB@u^%9YB(VWC!m15FPu8RJb14(pLa=#}f>zKBFR?1Ly7W>T&f6B71Ab(Yix zJ-f)5BUUfw#JcE1w;pv^+*onQUD4(261T7?zG2*b`*I$0yZT8jH0EX_$M7VMesl@X zRrAjQr$!BMW9;RvuW?mzwU|bCJ!siQ6wRitkb`PkX*Y#84%jYnN<42RX3mn z6mktj8ZEaW{05aTH=y4a%vgIHPcec{M9C$R^Vl3WiuNYSpB6`Hf&dwIpg}U;XKiD9 zC`Na5PfX+(5j{I9wAe`HYUnyA90~h(Z0zK zi~#Xh)O>)*(Kb+SN`Q3Dnkzs|#L_rLSxz!)r6#pMj z=NMR5u)Y1Tv27pi{q{@>5%e3;pLX7-xr`K`5> z@35D27MhPI{fV^n_u!vt`evh`Ifeh@SJWF!JGYCqq$hkb3B8|jw3>N|LCr&LL{M#@ zxJ64q{0$P^T_p9rJf!`58)TIMcFn1Zfd7Af?6opPj<&bBuJ3V02~x{*LXCsoi1-vj ze`nJspv8@}?#Y-?_Vu<+{F&#K9ZTT%?{`cpTvDI_%@!;;dx!(p?3A+lg5XJTAGBF@ z^HjW!A*|}9E&Rwoqa>4d>=;P@>d8&G>A|k5x*@h~WtQqZ-cs3jn`|Nu zv9EX#=&`S5A(rqbw~4e|)LC?7xF?Fh=hCeR&-c=W3swIQCh!M`IL{_z_i-oh^l-L~ zL9)dU_n!?HOhIwCpA_4fsHPsz%VK_Y^~7>$n^sGa>5mTg#{}&r76W8$NjqQ7ei*ru zbvw2BC8Y*AleE$XXQ6?YBL|$4)LqCT%4XZ^B+0kSUCx8)cT<1U4xMAmJVpk+65cL+ zfZ4UkOky3si?g? zm_17dSq%z_kod}Qk}e4nLsq!!)5yE`S}zt>OpLp$EBL+N97E|;an^L`!)g$_iMKCQ zn7L-VEYiuV0pa4^*GFg@BqilRKE}Fq5GVg<`Ul{OZ@(1ZT(57cB53FmmIQ!<>xcXE z*|e_it@kF86{kS!Ym%X_W+KNw*`$~u;w#8^Ua+bms=sLSg&-S%neOof+`$=rNHCN& zApb%^DT4*W+ilp+CsMF#;AiF)cK;!amrAxim@fix3I`_hSPVP;k!Q(4g`&(IrOhZu`nsV-ui9Y48Fv2s~o0$qo#a&E=#vum#i99;W zfI;a;Frq)o2q?eke-F+j>K2bifLr)QNecml4o;cCh-!Pty*s$H3bD2CT->_dWC8q0 z4{aOu*8d$5EZF)qFfusIo%41sxkuVN+FV7UI^iu&P>j_U$}3y=zWF&Rb$EQ>E$8vu z{3&FG(eFQ&zJf@vSqL&v?}z9wf_4r(X6AjIR~(RES}fBs3(#%3#|H+wN(tQPL}$2OhXPc0JF=QqWK zo(7pYkeB~zHa2#i_T`-8Coj?exAeB%aL+Ryq*1dTAdp0E^M36{lW>B8r zj@8_46Mj-36j&{XFS}VquDyo(IvalcXET-wL2T+|f*9CI%{jcE;R0_w=r!#+6_LKZHi2I=t!x|} zVM{1Ow;&M5p>1DTkl^9|fxR)g1mMl> zPVQwy_?}%1Kl#yvfv)v`pb$k22t=xV7!Dst-TABBza048zvd8onAAYg?-slu=I*3} z@eT=0Sm10^U78FYr|+0Qy@mk{%1aAQ~kVuZQdBGd2vvdhy zRZWI`^Vd&0UE@%%@_E8@N|$E$o|Q?iyHg#MAl z|A7x^B(}?kjtpP&T@Z}=!-9lr8*7ib9H@y(ys@B$#6i7B=RGbzNVZF9pMF_C|7TD@ zdZpJ38CevC(%V;+oN9nYC+AA7eB0oM9O_Y()XaUK1Ja1JkqDS*FFlyTyOXx22N0mC zCk+dL-e*LX%i8#Z9RbZ5@b550zC-JCyVj9cT$j{UhK?w1H|%IIm>G#3##;_0d8HBD z)^tLN&U9ub1VeEk49Fs7aq;*O zM>+71A25XSt3hqm5G|q?}FTNh; z0ABl%x}yNh+K=sc${(Vo;=8r;zYFSH;8FEafT&P>yc$^05&q2nIVKKql~)VLM16*Y zyod-mb-Vbln+cq@ZWX}SJ;Lmx#nbpfe|Tu1zvXD)du^uFqS=4Upf%u?b2m}AEFUED zp^6J)L1t*&saR%Y*v}U}mV7PNvdG5f%d4CN;+5M6Da|a(2$lD+R#9;Dv87(ID%?-Jo^8Y`DJ$Y2CSC$Mc`7Bi4^fuGSV5!cO+N z{Miss+2-4bns@(}7`Ewh4X^;=Wo$6gdXULni*}#3a3N&(W%^PoS0xfvlS%*RaZ-ni zH>e>v4TOv|JuO^U_t^nGL)D{U`K`{=PN0>J959X&p4|*5J(;>{B<%wX%y#YLewtvO zOQIMUxGH(5mrJN4%&IVjx>~AoIT!u!+jT7o5le~D7n7UXx>VAyETed;jz1M#ifN-z z>79nC7$&}LM2IT<424qW0b%#{%6T3;J(!B z;-K4L0uI8MB}>1mLcPLsk@l11DDc~h6lx&lm9V%9`rZHMZ7YawA2P)D^+9B^wP+7Z zgi7?kUr?U{jxlinw(d1pdW50&z6i{~aMhYHpwViGako$eci0;4hnEZ|^-UDVa6ij( z>)if;;DES4P@;wPBO(nzW;}$NarGIc7E~nJN0*h{hXvD7uo&Rm%5~v}vu?`{g@r)@ zLi2_zZ&1O{l@#*SKO|hN!FuF-zEwY2!~2*9|2ty@FNhiKO#&I+OcGVDJZTcT%uJif zs8a)>S#5#X*g?bd=bC5?>f=9z7swySs_D2@Ds^1<5nnIAl@-ox^`ZdYTOhB+aC-lH zLHs>j@dt%f-j5icsn=apFh)>3V3q=?P5*wi{8oDy;0t^j4gz32=1(SHo)uv#kUw~m z5O}5!ww+JWwI%;EAb$&DkHMJWp+Q0%n)LnUGR1_EfGnU6;?)J9DB>4RKzz68Lc?T5e+#>fw{q9?l^zjgOG zLn*!Ef1KcowF?gUhu&CY1)ym}X3<|yUCHk{pR#I8{s$Km+#tLD%#e{s!u)U@UvhM1 z;FR`ti^=Z8OcujiQAJI)U9*Y+fmA9apg!ts^d`+%N7D5lVGPY>BuEv4B{79PcZ z0>_T?JwO*Bw{C;3BKAtE;>$T`Vpon?csoN~6uJkZSpJzPqb%2g=xy{Vw~OixLIm9Z zTPbl8hU@A0ntr6~<-YvYf3nzMWa}R!J>zQeVZ)&poFP0%f*YncXU3_;rsWLqEgX%z za<-7^Ri~*sYnd?_A`Ffe_*75Agxfsf;WG)p4Wv@_WwotBI>tBUlU%Co&g>&=t?Sqy4Qnb+@?&fjpBjZT6QD{6Abwb4+hiPmbg2ea0o7#=4WS2QdCI zBnnXApwi%$PjlY9`+!Nyb>m{tKf?hdsUP>sn&!S(@KHL6F{d9_`~_CZ2UeTY7q(db z?O(Qo@X!2uTzrs3XX)$?t4cL!b8#R5hRyaQnWFUTzNfG)O)+J5Ao@QMRvS@(hQ=Tq z*O0kc>dGpr%AK6c3rzQ3HYEHBGAIYh%*6vAeN044_Ly0$-mt~8+~kg$t&gyY=0-1v zM@P9!@3uzvnrg?~k1-tQ`~p^EZ;>uoou{kk})ZKE%CC(w# z(9i?-hvup+{a?eY&av{)DSHRBlQz=|yUBzQG8y_M)Xrv>8Ry|&V+QgQBxyDIzE#Mx zoc=yPUC^7X&nwX{P|?W$%_R*vT*1xvDz70L$lE%F6oWy$RBVEpMF3&3{{|ma(1Uu^ zp&>jfmG6Q24FcIG7zCud33sk@-kJbX5Fj1{trcRPmb(KC4vwz(%{v1CV4xzK4$>&( zg=*!((0?eH=@_$ecWY_msOjVw&o^jsAJ0LZb@46aWt@8e{1B8Cog~u4V(ojM%3UQ8 zXlLNvBAZa)rOTuE#x#MKAyP99C&R&N#qw6)qs+7%DPQ(@5uy9_ibd zuhw1>P?!GgKU{LKKmdq>ADBYTy~T~ru9RkEDJ$;@rnXhN!5EpOZ#q)fxWKrKYRCzy z?(1}-UHiBZw+G;ZOd~OBzwz^XeUX6lm(G{eT~w;nu$BO8t@>{q*R++b=`7L@RvUpR z2NNlc$vH9;|D;#*B0S+wXYy1}OZ?(|$K*s4N!XK$89!zHjNEt5R5B}6i|c&f+d@|U zA|fdhu2`q0Ln>Y?-r~Cb#+i9ezS2r{0wxyH?J?k|ldgDt_rdcF1mCC8FXhf;I5Yx8 zgr6~$Aw1|cQ=Tqx_u)yVyl42g4WAdf-M|O3cqF#H6l7nU40``LwD-|` zBVXgtiX>Vy1@gL>tsg6WMWVOVOa|KL*$BGhMjV7sX)r%_lwa{OT8%oqc)9GICZ*PC z^|VFlh0T+%qD-}TOv~mhBUz7BfBd@IvlL6Hp>U|?C&x@Ap1A}^}(mR-$7c2{qmIkvDR0w_B z_Ys16BL`5UT57(Qc1j$-1^<+?s`8OoBwCOfM~Rx|Rwz}@P{cjysrsHDiFsN&j$Mf= z2CL&PWqUf7yZa|V@fUZ;?pj@O9$XvX`P(7Nq)ckr)3gZ$Wzh1=k#np=?JuJbxngM= zqA4ih`#xiLMQk(-d=iLPh{%|C;T3aGxnHTEM#8pxmbeO6_QAEEo3PE=jbjFc2T!`+ z9me`a=(e9P<%V9K+=jiB5nx|wMSICGgel&hMAjOoZ=?GZPX>A+prSf{Ui4Ct3h(1` znB#DBmSEy9cm};6t9?ot6)M5dvNH2^jh_tGUQ_e*Rw66Q+A2cNAXdlzQB|U4=57@G zxYt-xxRMbTu{y{VrYcRSbooN%W}=0Q#$ph*(#=wF__pa$I&*w8B1R7gmmN1M6$Q{^ zh7^$xI~*e?pZ|p!@_OP9DU?8-JuG}md7#m%RtiCJ-LDv&cX7&1DNMPmOp$#&1~7~# z%}q27PHS3OCF19jy>8LM0Js5dJKy`k`&z2a8t*nj@ZCBRf9ZzVvJ6lD&2mc1$&F2~Jo6X}aYe*b06`3yq6V>ky2NqH+)}P;G;z0U_ z@5Oz`t2-(eywLArkJ8gOmhOCFH>3R71_~ml zrwwFIYxKSO0K2m?>-15Rd1B}>Z;?i@t^YNJpZHA}4}T7q-wAe9Qqd{$Q3c~f6j200aj zYF1_Z2b~l)ScKv<56yXifL4OYJb7_+M~!vX>CE)eQPj}a2NmY~4b(6Bth(5YW-z5y zozHicXP09`EASsqT?i?ILog+6oa`1i#`>~y)v5`)_5r<-BtSjp`fMI5utaVT{2RPr zNIii!V+Ai5g)ns$f2-CzhZRR7?TesCpeM437kg7Jsn2EcPJ-FCXr(#c%B&C}LATbM z!b9F>jyhyHLAauepoJA0cW*LOK`JOsOd3?uoMLK`PSx~r+uGa{1)p)bpUi+$W(q;D z&92n%@t~hKu=)c!2>Vy3%n5ia4GKZ0QRVdok*P(UwBFwOU~b%Ryjf%6 zNAvI+?OAiWqYQM($@JEq_ZkSGDvofos`wMg6pYE3O$omox=)Pf5x1m7S}Pofbf-wS zWf1?uKEQ=8^`RJ-mRzLUP4Wn>i!;rT=HWn`bXo}4D2K$~yi3nsKaro5z;<5&>WuiZ zhDNQpJHCJaNRSwxg;L0Z7DD0+ek(~9`9_8e8VI8Fj!~@#mu}x_gcW;jHUY8K^5cRg z_T2osVRZLmeW;*XXJ~h^GP|PCzSXXGXb`&N+Myu?lG=?B1rg~-G54*MiB(Dp$ zSOnOLWK0-bLa;PvmFJ;S4My7OT8oqKon*@^4Ct>C_knhk+hS)6j0pcs zc~7NX^c&@;?mH21V$s5#C_pD;aoQ(xO=U!oVp_s1_9#ODm9vn_%Ay>SGMf0_6EZa( z(b+hPC$GFYqg#Ud zG2jIg_t24cLa;e$=ZWJ~i%3YU9?X;U7}b;Yo|F%(=JGE{7RqzGvwOc-mfiMbyAnnh zGioj&XD9rUr;%B%?u;$AU01C@-u*8+EBz8!at=XnEU87f2wVEsG0ZYHZn+bnQB8|uIl-2(JE9aF2;nj(3>l)$&LKavI*4hnI_>RV+! zNPisoqLWFRbgc#aNtBRRj5{hqbZ+dpN6*LQP$AZ&|L|n zlzSgX#>X&)yc2~Na0{QS3$@nsmUfdq=lJ8lp+W!!13FmW@i6Q(n^GUQ*hoRq<$ zWL2eI^~hl(^hr~WtLAjZQ%oDL^NY1Ub^2Mu@GT~*b@%b5gZELYWkAMm1sYi6>A#ZF zAZRAQiz$NE4gy=uLYz9S)AOxYN%T@JQ&Y*b=T7FtG&ZU)hHsO2Vl6s?UC9}>KwVO# zDDDrVZ1OD+?g(lgb&*&`T5 z*>{5)I29yj*3Bk|08`MqN#ox(+>AHfv{h!y9?k-O%=X(<+R_8I@BE$v=ro%~A43!u zDS-{FLbjC(S3+A<;-jN8-U9>tAFA-!{C_W<)ks8St2m$s z6Z+1x_zZf}tK5IP;^OfLXZ<}9FR_mzToGuDLKY_lV~5yvM%%7+#v9y{uQfh4RQ*{W zqdlOp+(RV}(RzLQ^rL=^_!GZX_W5#!;Q-yhN$(`FJ>p`7c|+U9IP-ULOwv z>_jX*k%B}mxu&}g=T`cbVCgN}sByc5eR=tr#kjjI6c9`)d9V~NJZgkckwA=&DLhwa z1Pj%p-<9_g2+4*Rstn=@T#}(6^#G}KR+wXE)YXu$XR`Ph^Mkx)U4*kGA@+JxMzk~H zW;k4yZ-I90koRSx>H~qkqItp)vRYd2i#RNrN#o4Re$Nh;pC_dlhXaGJ5xpHzIy(m! zSL@Y(39>1BljU2lnPLifm*?hc_JPXh0w0*Fv-%_JlPRv{px!f;3gA}}%Sb~4rvU{40?;h?4k9tO)PHhD)UkECPhr)WdBt9GCb3hNV+$6)6xh9=Gr z;%<1=9-5W^*Jh{)5&TS~FrJbiq9!_YC~$wEGfCH|2K$@EwRh3#F;JnF@g#EH=7{Wm z)m6|9?4#h)hT~^y;D^Gf-$q=r>HGWEZaW;(h}Kvtyap(4r+IePH@*uIl9`0|?{!^< zwdBuoDlI$MK~KoS*{HI?r!p?>+5zZB+}-1{Q`euxgEi-bo6!C`z2Jd3L++Q5YK)?yqjMq?$x#{`3u^ z#bA?+xADMr(zYlTf9Gwkx71KM%&`pmHdI^`rwcZq7V%>cSVRYt`?{CA!64RoBz)OS z45@Uauj1_>bq|f?#M7gQXB}OQfncKfl_9 z(0s)5Xj-8eQw%rq3Oh2H@;)Ss-*&ua(9r`G7GP8YdAk3x{-t?FY+Pm|~hU7Q7h7vS_)gqEF{ulLp zxfYN27_j(4KKTQ3fY)!?P{$wQk`@i~NtNCFuZ<&1%=}cKgsazqqy{4;Ka9qLYhNNR zhVw`zg&_ID1s_l>_p$oD9S3;+R0Svi{u~ngLJ}CFQ|qts6jfqtm5@KK{IeLT97_K+ z>U}3CmTGR`6T*N7i2$5ZbC6(Rxe8Z{nM^sE@k%!^kqk-ZbB%hVU3v6yHqi0O&P;Lc zzImD0{Z!T4fD?*HnFLHtocMKHh{FE${)AA#xf(i(Fbz3RW{92QQgO3h73coo!I@a_ zs1^{eYaBiZj8?W$!LE?}v~aw9{9KsOvEeW#K+>?=)1Onm_!Xs(XU}mzf6o#Lm6~6q zxP>K#|I5@WLxH%w^oHx?GA&_de>>lPx_)|*lU^wnR;9vE#OaZ4RF@ah#E$brDnWyz zH9HI^cIyK>Fk5~e!V(idda*GlScKd|0ksz=eh(u;zhNm+cq%J45%YSw-PiOu$X*CO zCqZ{PtB&jh+Ofb|5v0g3Z&%8o3z3XAOW&U`L;s#Z1OVemnyVFgx%IxV0eut)qH6&|CA}26 z!9wwR!%ZUu;Se~Z02a zQ||Mu<{c=dg}{Mpb%PlBc1C3U&%z>?vuc#=nK+@g^=F5x_ZZ4VLY3{=lJ_xOi|d&y z6-FvB36>XvRHHhPMy@(|F2{46Y!eBg7UQqVo-5X;687}@6Ts!irj$}YEzG|c8K5m> zxLL-L!n88~)md^sVO@L;3+wUL<1y~_HGonpC~K(-x$x_s0dOg}9rDRSEK2mfelyz} z`Lnxjtk}4Xk|4eJBm&uUi=?#sZhT@iU0L#L#!REru`rR!^xZveT!r~RuDCTkC_Q=b zED{c>&oxkwksmF8OD})V{eedt%J+mHh9|kK=}bl@+G_^k&Epb(`|I(1hxbKPIE{9x zm!*ogN2y_Esgb)a%+@aIpFnwv8SshHMGh)&o@2(lqvzF53>j;Yllm=wBhwXATmq?9 z$G^@+G%tX3`)Ci21&X&~AxHb7{f=NKF)+01*=Z~t4K4rv4-R#l!qF6%1)G>#1Pm&0 z^#RYGnd3gFl+t#G3CuCu_F*{3#Qc|Yt?lXDG;U(hAgG|L=4I}5w6T9?DgiXBL7kwn z3=h%Zc&q#t(q)-}$i4^hMVayi*K$`d-UF>=9?<0CfjETsQlj?$7^;r!so~8fdKaR8 zz1V5g{A(Z^@bA@DlL;!^+bnac({L~`;4JAsm%^5CO+{QJ3XgK7vR9xkev$40e714`yk6!Zc*n9-*khI;Mgv*OKiiiJ$SpYkjUDBbh% z_Gkwx_=Fg6kJEy+y0$63{H*P+Tm6w5Y-f-g|RTGpL5^j$eZ@L#I-0yP+@x(xo^ zVx=9MNL5v0h``r(Ehr_i_^Ujjlmrv*e}4f3iy580TTr4Uam%1y7FMf+4(wgcf>ZXp z$}qoBI*E0IAl(*%mc2AxK}^Y(^W*T<%T})aMSqqt;G4LVxVH&BZf*I;PDZPsP_J!lXO~$ z_{{H1CDqk4RuvG@2#qfvrNSLQppE2Y(=)A=9{+E>e}`^Tr<;7;Ti9~_Hqp0WqP&_Q zv_q;5wW!5n*cUFy1raKNpt_4_Hb{*rB5jJ`PIHA_MXeODh~99^Gk0lo;sTSEfVFv#(wN(z@Gj%LGNNFcIky%872^IQPOdu zIGc*tLeX1wXA&^*$bW9O3d$a909h3LWe8s|rV5@lUPp{6oXCPbPVO$Q$cjs{mmj9N zce$Z^7)I2O(Qr>~H3%)4$s_`Zh*GK2=M~YuiZ3u7iRDIVGM%S1`y$3KTaZFi{XO&U zdiH$(b_J!Pj>+$}C_dka9sJih?w+aFCz!WVPdDXI@RX_a&93HF-1CW4@MlWE+Y&F` zyOUG97o6}Vu*l5llE~@(Te`lbPPah)#V_t5Ft~q3Md-ltUk%$RtAfzetwzmnqan4y zc=C5I8X@*-(zFIn*KuZl*@p?^skI9O>LAg0=?DXfxS^{2RK&&#%~bfwlam1nvQe0p zb~SlQEIIutv^%$>3Gq(Td~9)aGInsrvqM;zo<$FGeUTUiTy%0^b6N2qhso&3x3~V9 zaP=oh{G+M1Tly@-93b?*OOu;tw)2{mx3fJ4J>UF)epD4Wyf90k*Bh?&^vqpJeTe3L zF44IEM<*Hb*zIb55 z;@`;SG6q#>v1V;sC^)ZiK0mPh4gc&P{f5*TUd9qFSTMG*p7TXYVE$ZUtJ+MkHkQ(RCFe;ky*1vO&jtp`V$ zx@X=zaKW~9dgFN6lghL%T*V&Ov)#2jpUu;~#{wn2E5#~1pNrGiR~Ii}qo`s^@sPC` zR-HZZmyEfG7s1FxrfyVKr0Npd5ysQ%>H1@E0b$v zr)*@wHv@`l+E#qF&}3k0z79M9+FiJFMDG67d_Up;l$ZBn!#~?O5J&<3cCMOW_^Or3 z2mL`UAWNb=w#E+-n6AR8(cy);)V~vrxigRG&;FcXpt}Y{U0}6zf%jxvP>lT{iCK}btXjX7%`v^I7W%_8Hlt6Z5)Ah$1 zE(RVQg8o21G%4L8T~F#Qg;U`!`IpR2l!!oY(ZT$)ut`E`#L>-dlRbXrsCzR&=7#5S zMZafC^P{~Q{0a@dX!Ll#2&UT-ofY3*_m>>Cepd!sR8#;Auy*KJ5&>hCc~hJjxMPLx z4Y`Kx#``brZ*0K6!;?%0z63l@pC-eS3jRCWLaM^tvko1a>u|#`POS;gL&Lko8;~w{ zSoFckvxWJlr9T=qC01n=axn)NRAaT#Z@IW)M=S8BzkQSTnB?MWsZ~g6U(^-1o^^UT z%zXL7v8k9m$~Mxq<1i;CE}eogKlgXZz>?E)>4%(;ruQWGJeUT8mcx3*$@So2THl~X zx58eJ)Nd_o6z68f3A&G1K_8HrKGle^7K|X42L~#-aC=5kh^B3OI}TiZ+-LGX=U;%o zvnS@3yIYbpsWg!-6j#G;;uyF)qD$%8BXmr)w-f-iGFWpZ65?>MeV+QKD4M8w{R-BS zno(1S37%0aAsS+D=Vhw8>u1z9<*X?j-eW!WfKkE}bPVkL`+8@1j}(Q&q&GQ7*@G?L zi86yRC1*k=K=@|z9Km>5vIv`o@W<{TcW<~|9(-|e>Ve`C4c6BWEg3?W^&g~Y*kwvB zvu66m_W*kXgT7iBEOqXU>OU~{A%34Q>AAk+VSaCPd+XhNGI41fIs6FTlF&rlj-*HH zg<)Xy8%^5m98^KlqXtdTu#ZTkWXk-7NU&mzimaNMF=6FS7w6B8lx~g; zhDrEC==lnzoGF8Tw&1_3k(E@b(Utx+9(Pke{&1I9@li5`y5D}g(&hw)xf6+!N6*oL z)g}~uU76$DI|eQB(S{qf!O7>6m_$%SXYNdUtpAliXo1xbtoSEl+Z3p?J1kgE%lW8^ z12yf)+A|$;ZW8f_^t1AeV~ek>)>_~k>5hYT*8S!0-xJjtpPE{06`?^c0fH6_ce0S% zwNJG)nYfc2_JerIxpRK;=_>KH(?RyQ79 zDqGBd*XynU>X}vK8;9HA+3x6ZKHF@9@ zkJSd+iFojxfXQr(>ZRi{*M)>=&gZkg;+h0KS~RUXjZ^*CWkS+DjZ4AhP>_1#J-QZc zCt5yyf^|PD>b6(x>+a93-OYMc`mc}sFj{c5z5%p`6~`J7JmxryDT|BLgBLJvh6ta0 zw;VlcuX{uUB%kAfEs8SN<5EqFq}@86X6Y_Qge$jG*x4q!7{* z*3pGeV-jq`ioIMXq5Y*xtzoQwh?JZ4ee41ak2WwaZex8M8_OaB3C>w&q$*lcpNN%`ik1^b zbc~AOt2`!>+G{$#U(_l$cn#3(n=F(=Q30UW!0B*osCW)^Cg zI)S1_W-E1|PGv)eR@Cd~*>0jf>n$x7shvl#5ygc&$4Y%1+!+*C2tq^{#V%zPHged) z$p!(YQUv8m!67&MQd+XiR~6_cQ6jk_5P`4vJNqG_&30nPiZrsovf4xu`G#r8BA)Kpqk|{@cPcN`6j<@p{Q#)nTjj}O6KQl0%CPf>vk_*3 zCs1PdDXF&9!GlSH^7$m*{xp@LqW5SuKluC?>s#o*5>@CoQ3@~fO{)992Y_>Bh1K2?F6fSco>0?XSPO4iJP5Qg+CArK<$x~OF>Lky<=Lk~K~M~R*&>)Z7QyaqM0wl0K697L)?S9v++&E3W>QxU~M8O=VyM4WQG>$(>T5fNd<=R z+H~xOY)fs>hN(=^%^(tpIoh!ozdEQ|I{D&>D({VRMX`t(fJqXCIOag)o9R9)KBJi_ zba!8$?PGzMvV;g&I3Vg+$`o?x!1xo5>bA(h2l9%}2_A=U1Pf;Ncs>lqxoAf9GkA>r9f2ZK+wdz)Hika9Xc zEM41M%bq^xoQa$^D3Hik(~{TZo6XeJ;awViiYcn9S8AJbjjDUPfOn2q<<#@b>S9gi z+#JLn&cvFHQ*?XDyjm#6w`%Aaxu0fK%9S){zKl*;I<==*=spJ9JpEQ}4?$3Hhl`bL zL2>OR>$$sp`p#FeCQUzg4(T`5s`z9;V^1PEX`&veQgiH=9Prh1ojY67fam za7rQW-R!|DK}!`0dveFmqxU7p6Bo}@o=60R= zCl!po*8b)=>TTH1DkPRv1Qkjw2oEHM%`xU=@$@wx%0dJ`-6^*~G!z{Q{v`$>TCk6w zTE{hVa(UI2QNEe8k)EBiu?q8B?Je`-@)WlAb5U>O;q^;@RRku+9s4+gETkPe6(v6L zoL>kK)T0%g8iEdUCQ7Uv0m6($+Xxwll)(*&cs(}1jxe9Ac~_8xzW=q?KV1u|t#=l6 zw-qdUpWn#lmDXK*9WQ){JBrA>nICR+ztdv2Hj#O~QHifL8S7Xlr^_9E=APj@A5TkB zlSQpEVfq~$_Qz1&Zf~u_n?jtSU3N^+7SS`p@Gr^?@}Nj_5PQ29K|j~|H&hPL?*N9m z*|4~qDUm-RVtAWi(h56C*#={!O@u`~%qP@jlO{Z-@^!Lb6$a@70x;s zW^}uOgc+5MSGzi>PQy<|9Oc49y{VITQp0fdM4i+2T-uh-liPvN-;`de(B&h`!mGEm zTE6XKM9?-C86a)67<;a0^J0#_&=(SPvxFgv21N;&St#xo$*?Z}mZJ`Yf{XBn#=C+K zcEZXigtf6uvrd6BdjXKqr~;COVk2DLwKKAK?Fo5CyuZ9ZnnPhpv=k&6=@Wr{c-*St zE10`@ z5pWH890ZM%=5QO(>2(1~5we0;2Pftq_6 zcR|o3I%4Pz*1t%H76JpGHU~U{>D7L$^u8wlkxYl7E{RCz#*orX6y+aoaF7M)*nDY= ztwuahk^P?ec@IGEdaF^bHv%u>y$QyR_$+QS4yX1d^2?$QDv*c`#x~uOEW=Js;DWFm zmd>xp6P(m)Lzdq3QZVv~+T*8A*t3{kvie@YirbJ8tLH(1MJ-$Wf3*Ns;?s-rP`P*J zWb%zSxB(v*901=BR~!uCWO&?hO4E>EpD1rJVD%(sM*X(J!*?~qDOSnJva)2At0+0n zQRsdCg4pz2I-tjKiV!l;O;VB?=LZkznrbJoP(9d9;|IgU=A zeh6}eD_l;W-UDuKHfQp-<|D~HI!;uqXk%f8ZK~HiIsyqxxzBhxxX!gp?AxebJJ^LxYR}pPkpw-RZBgU!T!oZA$w-r83pET+$`}dI1F!A~0{)Q7J&qko!4D)pK zKsKHHG!Xu*;XpqV5hqq{Gu%g}r6lRU0Y$`nyz5ir-rPdnIKz6%HV4FdWcrMeR?Ssp zZ_o3!+djFimGbl~(cViR)GN6Or;&SGI78rWrCz?Pzwvm5{iW|t;m_a0t`<>lHkb`< zfL2I0Ju$d?ONgPklkY~2C$WlF(P4JOhCPeWn`Yc?w>8$`05gh|ct99bc#hM3UWKRY zJktuUm5Am`v(z^N%|6pYqy+LwVyjMvhv|>v`P>>D{qud(y_{=;2)fCAi;dF6U&} zOi!>G@S;}(EJ$A;7abc^D(5Gj&W*lqeQ;f*uUi@7AtBZ44=rn#3ko+(s zVqMwBNpZWNlKqXv6m&+%KR6mvZN0~UdMJVtHgATU<)MDWRMiycRR5Zrb&=>_( zN6rzzj#t@HqDrq(qfVNMyiSo;ZBU!<$MnW_rPzyA0G2MRcz{T-Fe((Z!nK}27Rn5O z$6=+0!?c|Jcx$XLd4Jj>IZL{TAiJH#|^i~x&&>c8HJdF6bYB!i5M;fIgHG-1;uDbk@V0b^(_ipU`#2(^QTsg zsIS*3{$AHbE>2KR^x#Nz{~pcjA0v|^F`v@i)9c51iO4umjc>CWH%G&K&|NCO8#^@2 zs)8UZ4GIqOcgF4QlLQ355Qx!eYrJVHzK8ndMXXE)9nH0A$!w;vcgrP2MvI(Q@y~CU zBpuNVvhG?rQX78^^=z8r)DrMQ)et(A>#C}K_o?&2p@myFj;^Oph&b;;P7x9`|4t zhNyk*t?b_*M;tf{$0thD@L-yDR#A73;IF=*zFxhw&(3&*q%l0K)g}INzP4>M-?sgU zI7ze0$JIR#;N$<(jjU)=56$)4R^b!$*Vj52!3gf2wzl89JpzXC`LnwerS9){`IRe0 zr^39ssIx6q{oMCx{x&r~N_!ElU;)B+0t8z&VzkkOA7OJVA^|B~pDX6cNCbQ9 zYt!LhsVIiTo|1HKIbc$)$v7WZB<+x;bBAH592nQCG&vkR(arI?3cKDscQ;MP~PPxLlFg+eSXB$2dmTCthX}YbG_LT ztMt!3oGtz`8bG%jm;S?pk7QjBVZ;2pf2}o)eSb6~d8x!PQkHv`135`WtVD51j$>>; zzGJSBF_I_BXUMRtA4A_|J9X$->?>b+VDr=Rrg;r+MN^r@d1lKbxV0hjz6NFcP^xA+ zQd0xEXqa543^_39C9IjC-OJQ?<=SOm@D;c_XjlW}qvTz+nE*_@IbCr|Yj2^`Dqf{7 z7L;Z#-q!2+GZxZFzutE!9N<@lGaFj^6ism*4^(VesehMzQG^DD-71!~kFOR!7yBt6 zF*VP1)jxo_!Oe=gt9&9$8C*@iG{Bl=3d3W~I-!_dV@T(W_i+myjGCu=`y zSAS7q&de-PNj~^eyrl_NT3dmO*8MilK3>0qCa+J_rkD&$J~j{;;V{PZjNSx z*!p`*Lj8&-ckg%KBPkstZ+>MkXqfRf*+X1-%-Iw!AHelt4ialWcpmg+=Pu!A z*09mkl>c3rIi*mrdzxPVXEtlY%VBewh1N z&~2*nLUQ#&Jtg=r72PiHVx1>}rjKbWM*|9iyf);ns!{Z#c`f{$J1K7KMnyulBCGQv zWDkw}4Z3FR?jP@=>1=8|)f1@#q+@3;c`6hnhKSwwT}rXp+!OR2`F61P?>65prIMiV zD$=#Q2eOGw$j{IdGDxb$L4oINau_O?0V-94(oaO4G?t}N!fsh+RT93Ra?c+#hV&Od zqw+tOFM92A+z#`GD5$Lv*-U$Z>5TDtIv)&@c8kfAxj#L8bxCys;-B9eS&k?9#pGPh zx+!|z#0JOt9WYYOrE^gjGCYKs`f?5*9iN3F)ND~zO9o@2aIkpVtb0eeuj~XlcH-%n zkjS6$^x2xv#+(WBrWodogTf<{}|lrVW5r^E1RpWy-rL@zTE( z54Rkf1)q)s$0=2SeV&J1Wvx-x{s+1h5*Yab6)=}gl~*{Q`MAw1@j5+Q8!v(JTanb1 zHhBsuW4oZ=t~$gtMVVEEuH_sQ=5(3mn7g`4JcE&c;zTZY1A~}3N6pIM2&R)Sii7q-WiNhGQtjsLSJo!@f-ffeJjj)>+(^iU#!sM}^<{H~%>}KS zDKbuaF-+is2U;KUilDUXnI$FSAzH4sWa}Xg4xtKOx7nIFT2)Qe6)bbSRhr7z8T^w5 zB?8SrlW#N6%zro|XDh;>`Wpa=JXLE@m-w5PwmQtcymZ8e-F)+eG2fZ>&ko{r8~Aa! z$wgB~LXxz2oa83`h5kv*d$aCvIJ~V?=m_dY#IxO6^A_oQd(;?u7urM)5K`@Eu+w9g z7;1ZG8k>@3gs zOs-4nUK-x*%Aw^0!!#KMrdN8mCo8!U`^KKGoajTU)f~*iRqUmXqOVD7L(L|Q$YzWy zn+$9$eBTNB1dyQvUVm>l6Oj*WmsNF~e!{1gsIPR(%lYBy{`NN{Ac|N+z8RCG&9YJ% zKHrFZ%CcNvi|&K&^^Cf0HYr*mMV3B?GNetJVm#bFxE-c`qU8u3a*}|C&I^3`(@=%S zAH}o!A3O{e2s6V}q*;Xie!I6CA$%O3tj7JrDb+JC4U&M+GRt1%d>&b9hlkycJu-hxMCHuGPGe?!J_wak+QYnby|C#aqFW!!GGK}XBDCyEa*8q8#=f-BCj8Zg*c94%|PCv02I!V4gt zgTH?T;FSk4)%dlgbKZ`N{7vdd!!kjFq=y>m@Lt~UPNMTX0Q>(l0868q5Ygnflb|r& zHUxcp8IUm@KWi1AOk-Di{8h`J9yes%nT$;q)z?yUx}%4M91$69J21Ii9$%r1JrzZr z54^%P`9YoSTmq${yNcqxF;QD~W*+#Rlc$a{#fs$CD?Yzmuk-L{b0RpBV2mb`Ffb>r zOm<~uST=Zjqw|;bj4%^%iBXYgzmn5Y+-1RkP}0A4M_n1g_ioY8Z-bldEBEu!GqI-n zuMFM%vlAQ_29Hz~@>j6S+DwQu+Qi`GtH9%?E*AgZuDpL?lJSx-*fdYuB9&R{I)E~PqgHW|KQinbS^Rkxe`aK?jq>|bvoTv_YNaz%6wQnXv zVA)Syg?62T2>kbG-rp!T-NBscI!%`>=9e)ulnhWYA>|C3)Ya3HN+H$G{x7B!WbN-|%6C{^1 z7TTE*`8e>H;wAeake1v2S|gzLOHRsyVys8xZ{6eS!4QV)oq|)mGjdm}X}QFKN)d0) z>bC-?ST58^pn1Mcv}jDl#q&P?SD za5j!A%Nc$$Y0&O}+mWN?01hfKf;U6N9OG?z-+sZNeU!B5xwh92ECb3Gz@rL#4kJPW znO~`~7T4nf=$_*-hqugWa-~&~5Uw%c(?~J7RUThw2c>r~u{SlpbIBFaER4Al@oCY0 zsZ9GaZTtro`E(w5`jOl?Dt|tdV$m_VPG?st^64TKLupPgfSH_|6^D{Fc0*itCdX#H zIF+$2K&zPpupKGU3&YRkiFdcCZUAgJnvs6z@mc4!aPbrFqE@r9MH$tr&}t0Xhy0kX ztI;0=b!i0n1V!{bgm{6)xE>LHD52`5D-9*1r;{>#*u-SF%8_D*v@%Oo%tiwJE3@YM zueSrlj!%BMflNs4!v>%kP4n@@pL6V%m!ILgFoWU4!}S96ku@FV$>>|Bu*nQk0pIJpAY~;jD#{OVIN<`&-o~(5;IgAH8r*Szl{qcjrdFy^vG=n#rMRzx zCJ|&}d{%JB_7Q5+`FnjNb*80VlAxb4pz)3hMVnCV69h>%$8}u&zTk)}K&!eyyL9(J zYU*7O&`Qt}lv2JwE=Tgy!cI`FjIH?c>P3X4536dLnmyjg~>Xq)wNG(ZlrQ8)!5-LgFS(ACO}k7uFelc|`=9s!c6h!s@_OP@u^B zBYR9sw=HV~K8ob4&iFAam0APea`6R_|rEz07GwoF8>~aC;EKye(K~)&-3ABG#hWX$b^&y^djd4@!4|< z>OEB-{rz#01!H+btRZ>wvFL8L=3SUY*J#S8Sh+;Q0sLTDr)2hLn4ztnD@`b$gJ|SW zKM{0=7T+YQFF-Es-HAz(8R{?O^`Ei)Md{}dl}ov~0zTkxEFCz`lz$i)d~doHJw@?` zFQ#OcrzM%caYk*RUU29kLsHt@B4x$9Ji+;XxKC5`*Yekw@C8=cwsj%Fze~1BLD3Wg zj#2qNCHEs5Ec4{V-lgjc^Y2I8jH^cJZ~sb2Zm9` z4R>8YGPv4x$tvHQ$F@vV`)id9A*3NQSwcE*KllX6T}{~zHn?WC{Hc}qJ_Nxw_erhTO;tk_@Z6e z(ge5yW`)wj#!XE2rcMN~rqB5O@g4t;>TYeT&#(_qshVEU)K)cP+|fkFVSF5>P`!x~ za{+csO&xJdo2$u%o6O(jm8xO&ARh9e^) z`1|<+w|Xq1Rww^d{rluB8KX%jiJ0vI#;vsk6Fr9m_W)^NSO5gP=e;i_xUIH|#U{_2 z@%Vg$JNgWmF9_oZ>V3kj9ue5zJMdT(~GH}ux3Y24D&O5Z*pi?f`}=$o?;=q z{n?lLpkt^@P2`hp{{lg)tiSEdD-l>Fs`d;qI;LQ!y;Sb%07NkeoA8gt4P}-rfi%M4QAic1b%W5+-_ABxF<}guZNe~uZ zRFdhJw2g&ibrxZVw6ulfnB40s;0{Cg#$%@aR<#$jTQo17N)+1@>D{Nq3BL(kB}$2X zME@qAb=6sYJtud&6wlh^I%}1Fb8`v1?q(~ zMIXecF`+=MlI>BtnN2FF^&M36zz%`0rkf@!s-LB}#u6&WV{yx@Yla5^AfJSa=1qxA zCd%alD-0?%nh}4s;1$wCI0b1*()pu>MdbM?Ewc zhQFTx^6Yl2v9`^z&%RF|wQU8tZTY$k-3|&OqW>&>i<*Ws?lZ>@2Z8;?Gm=2~x&lZL zs++jlP{7~YB`7B|@lEtoD53xUdRPLk)j35DftbbNylYG^u=Dp5&C3C(og{n3zp2}~ zmO~p6VgbW$CG_C?!$pW}LmxDonB}YMU;;=_7JtQu&C>#b04+&TmiGjWsFg-2${lRm z;`Cu2;$cu=S@atg{AqFZHyXsd-zEs?Vg_(jfM+z_qw`+VU=c$ z?@w8GXf)gnC!;xdb7CS#S;Zh*G)%%apoR!J22!e(1IP7xnXiSPV$@5nGbCkR0RF zvBg%fF5^m8*}>osFNM>$@P$v|Atb^^W%%X==YZ7&0AwLV4{hyceeqd2CgeQ;eSau0 z6lRBlC>HD2iYH>oBi&$Surt9gsr?VoERh+z)c}%FJ{rkR%5h#eKT90vu@sK^WnC-k#rt~_1T!Wi- zne==3zu*G+zr%nust%wg+j2Lq%%r(*+W5pP{Cjguh#v0ESe=GU1j*b0IDMDGVw5%` zfSceuGXJYL8Byr_PKBh0JjwAV(8}47XKEF;P5>e9B|=i$S<={M@Edd5eq~E*fmj4e_0Tzm!)gy?Hud zU5MY*9<`1&awPU!42UZ3XCjzMfMs>4B%{A*91f@jDmo=i#Yz1|-wUvM0fO&`s0Keb0G2-$krIP*TYnUtIqA3j*xHJQ zjec{2YWxEpT;!rKfThekvE5TWv1qEs@5^0=RO!6ne}0FAkPpO&@%gP0F?^qKRE-J8 zHTcp$AnG~&s;-Uekh?u+PVMW|gEqr2&n^S3Xvw1NrdZ-Mq3&t`e=}JV+~GnG(B2R8D{8O{3Y|)4akw!pvrJRIfel9=h=2~ zMDffo-xIuIa&AJ@%i;q6!mdOixGJiuSN7}ZwbjlJsRm!3ECtQG&3DM$q@ zN&Cu~lc+0eyv@>QR!WPCA(s~+f#|O!CK?e{MX<0bOWc^A*88i*p?Ucq&@w~w*6avW zBh31O{HP<74FbzWf|wBzGyy>q{+597_wWTQ6dMb!1knahHDovh_8#gPgG+ z_mJLV7zY?QOyh6@p8JRHVNfFrfQ-d8yGSct+_$Ip@&bN;(IaS{&f_e&N&NT7SzkNs!KB#gKt1IGm&eYBN zhtjZj2#qiRqIC$(u00QUuiS`Z)+@$ZH{ zTTs_IPrMlA79lbik7=~)qULH;VRyx^po#eSeYk3u5`eD{f(=KWw+M8>N^g=m;LhGv zBQUoLloX{8f;H_1E&p3p9To5=AjYKIz7hYxztueA=6?g3(kBd4CR)=s{FU?Y{wBk! z0PWtEd}UsR`8gW0?uL;sN=0`<2)~OUlu|M+ zh41!HreC?R41JK`UGl#O$%_bsfPcR)i+uwlQ=^!dAcxaD5S!xtvu~Odxl_G+54}9eEy=UA1UJ|rZemDF(91pObjW3SpY4sdVs=q zX02L>(jNEbG$9KPG4do{PaH@GzzDz9J&M009uxhi1{3u@5fp9JE1n#fbQkaAJ|ZX? z4ru{?q-6TpLVj|+a@*Sr&S66T9F-RYoVndPCB#uV#x_JGPYD0%Re9|MT8#-_Ts}19+AcA z_ui91962Q@{bCL?A*;f+4HoyfzqR8FDF2!H2w;@9v|jkV4)}i}054*^@xpsy@L1#H zu*;t2qKWSC=M|%g8!sIN@l{lm!*>s*sIZ<=y*5 zRq<+~e18Wf^Zg^8_R_10nMnWYcW>+IfB+TbBslb^)1@|~mxGJ6(Cgs_NseH=N}Q1D z$M_FoKEsXyZKk9JyzuWq$vy!a1TaUG2=jmpJyrBI;o~vN5xG>Fnk!3Y!Zd@3N7Tl zuoG4_|73~zb1Xz2eXq%DRMvP6xcwqizg`LflLbz|nE7u4SStts^ZueqxW;XvEhZcq zAC=!LYO4aqV(|UC^2wgyG`PVXLKKbx!N3=A3CjBo+fx#!j1vx;yu0cTjdRtEF#>sn zrb?fYApgtNV#WEqmt+e%LA&=47^s7Mv3=o))I&sI1p!daql^IVE7s6<(fU7w41?b z+o{ww(wzRe-{|`lNgKvVUq^P6Wh%3_=}E0fMn$Mqju)W>{RtbWblm_cDfF6 zIlUMw9zsRqNKZ*;(6n1Y9J=V*C3hXUTJtcc5iQ?HccnDHQ)?~eac3ONyv{&P?sW0j zP}`G#AHRr&Xb~K@IVCtnYid$(ToS4$@s=R}t&&8N2rzC+;pjy8WbH9cA1ZvA%|B|NBWv%5|a>WL?J7*=+tmUu0 zhhpx_eza0?=SaxR3=SbF7J5irG7tCIv>i11dzaJ1oV5L7plFDcjZPsL{t}`QE9vJ^ z5S3+%jP=26iR=uFbC3)|3K?YaQAyX;LM}bADz6EG>b=t3rX59*-~`s*poXc;=$J(+ z=<-CUfx&r8l##%8xqMH8rjSdNwR9ymc~BP``ET|5sTR^}Lf^)KVOBrhWyh-beLxd7 zEt7S00^^O;QX|kJd@9*ABRf0RC`$t>2Au6wL_ePtk`%FM3v4~TFxlpQ9SmYJwqxxS zT#8IJY#ri$L}#+UDh9UiKh)7(__0RXEt;1bczylMoCkka0W*}yS%EHzlSisJqHH6< zP_wt#N0l5OD$SWLmTm&PQy7~?&ID=XDe_^{*pk-V%c|oi*Xxs2{3R>r$+fy@G?g0V zk3YNbrUz#f(*NFv;t4QynalA#u6-t!f~Th=bTm-+n5eY^TkWsZ1ynBDke1fo27bTj z4Q&_*m2t9Ir|hXH)J`^i+)>1Y1jZhr0Vgr&>pkK*x3Zk*+LfwEm|-{U1QEtID@+&!Q#V-QSTd6!1alEe}J!qVv!GA<+ zHN(YeY@IV97~D<2D=X#SiD#bliR|Z)X<5?eW{_VNm2RgMZ1wJNmsri7;}u zH~pTsh{cK&;_b$Fzf9i$_|XH;a;b;nh-6bo_twA8i z!_8%6mxV^0`zC8Lnk!9~Q|bPmUrs7rBChoLtk}S1K=)U?NF_!lRam?vd>C}kNqh!# z|2)cDFZHjDtY{({TApQGo<#v~{lZ^bMP$!zApVt3~$-@P6d533iQJ6zfP4$*7J>4T|$A@cEx zi({qE#b0X*Ob?S$on!jbj_EW>+Ck^5dky)wQPc#+4%)(B<<8Y+a3y$RMD5&|H&L`3 z*yam3!Z^x;Ls`5>8;gJqa$EbJMgYzr>2#$w$9$EhujLXLZ|1A$K~z#@EC*An{mLu- z19O=UDhnH(_@8P^*IhCZl=gFk98bT?F{I=A?k8siae2b^d?uB=eDh=WgsiA2$*8o& z(fR9aJD)0}%2!tQJf&lg7N^QW$Ncy4y|2*0FBiT_$?0Q~fpt9glpd#ALJz|vG}j3~ zgDHHg%02H!(c!H}aXIW84yFRHdS3(TlE79kuTP>Fgs})u1ZzdQ4tNTBOtYXSmJfsx zJuezl-G}7C+p_r8S_pH6Qes+5HJ`>*p08(Y7ASn_%{yJJLFMJnZn?s7UpKxrOLFKN zs`K&boU1AxJK@oe*6$9>(2O>XyWbm2eoRq+&f?P3f&t`VqX zq$J;RZ~25UAtZ(7>Iui+SvUAEi zLNg%%z5q!yq%Q-^x$c{5Jn7dwX1%&9bZ){eum)Dtioh_}&PvMB_aBdp#@Zm=Q_=jm@3T`0W&}Chb!a~s$uji! zhuoWOcs1$;WAG%P1=-e>q{QBjbI8zGOHP(;SGl@2tk$ce$I_ zWLLgFnald+OD9xsj>YHBj$4pBIrSI)ko#`=4As+ZPPT>6nNLg8!y&wX**k@x(UdRb%Xx^3oG8Bs>gPrqv2sOp7my z!)NZ7>eVCB^CuX~dgtLK#nlE?_EQAW<2Z#V1?wRAlCL4h%OYdSUZ5E_Tvsxsw|mt; z2~Zl!aC*o)_1DyON$Y;`3S8$aNAha>GS?QZs8j~nL{rnHRS>_X5K2QA0;VK(+rid1omzd zl|1nK6w{2#w+Cyh!pQnK# z=l%ZuR6s&a6N_ey#+~O+JS9K1hoke^^bVtOV@7iwJN;6 zW&pcN@cEsF`0n1~9BIXUAkOmJ&sW?_qW$}+;>RWOHm|0!3`e6{<6XPh)f<+%V=etK zbw*Z8oHMwb5v$Ew<|d(7PW$!u`NnOU)2^A>&e5EevxSAx@+_FT>YHt9bR4PNQv!@0 zUb%UI6G+GWct8?HE<#=~5{2-;8saL+x6;~mqLJsDxnpXtPZx2wlblNJ-%i z$gjPba-N+J!cXpYgqVJqLtuz+LP7(twb*l1`&blT?6ufKI_!1R(;fJNzAVwwFrc@q z&=YXvShc&EogIyR*o<0OHvS_UYmZUwir(5f&T)!cXZ`oK^YQtH%`{6Z{R3++ zCi{;PV%cn%nS{FC`sk+?qQbFlw9>0t$f#H_0Ye}_SJ#024dawurwhY2l@xDLRd@aB zVNp{VucIHsQ{7CqNT9Q-f9*LtHh-ICA7HlvAY+ zK=Q}pTK%}bKIQ|aAwK>1B^u&>%dB=@0WEoDY)vR!h>sdWkZV->y+HL=+q+aI^-u4p z^y3eI_0{L(x67VOg9?SsL1k_&isy@9wA-r*rzKOP|lvc#-1?ZQz$e$ER+O+2Shs^OLtJZq7=iN3-kMvYr!g z+lT$ptopw}Le}2g0BnpI{q4Y-D;UT?0b^Hl*qjCvnjIuNE~rNh;c=RUL8&GV22WQa z;04Pjg+aGxw!Z&SPKb?A^hINm!}B%5`1lm3dMDMbvJu2+KPFHu|FT8RQpAMFg3h0l z6qCLfc*s=iw8kXk3po{N)Z1s)kO@}-8$-b`4h9`k$bKIhLNeHD!u;aC?1w^N%cvQ@ z^WBQ*glg*ZZR$G#ueJh0a2=`HiIjj-qlIdj*eU(y*|1s1ev3T4 zD$yfdL%s|NgN;f}?@tHeLwc@@sSn(0sAGErWmCGJ{gZO04R|AReO7Z2@ny_=pP|9I zzQO_r_iFhtW?cDow@fZOl9#lHwaQx9xxj@LP1!orW`}wtIeWpP9Omc51>soXU(9|x zCK@p|k17UN12`VRfn;DlN6Pe*%Yr~35~Ywh8FcE*i^RN-SpVS_eyRhYMAntp0j63_ z&1559+AU+iPhzoe7>Hr)d1L-AYl=8ydu?f>Sj<|?*t+8HfKea-W2t8C zA=UTW+jW<5k?kGf!}^B1eTsk!?AHpjq^#Ms!P0@3ULlk$$kjm#&KAWBYfw;_TS`lf z?-KT1jr=9Z#e4;>hxbdUP~l_eAh-CF9fO&pW$(7uclzq{c}>O5<4kVw*{NgGb?-d> zujfG`BjYzMF9mJ#e@ESp*1d1@JXI`d4_cpmrFfR4c;RCG)dp`SGZj?jQSrl#3;Vxd zC8vRyjhDvN!=%4TVyH|9lM1B*Q*OdPZ=0394)?GX?Aa8Mw-wl`P>CuqA&OBPebA=q zC@^zBB`K**n3|rp4pqr^Xmkk>L%sAn{?i}ML%G4bvgIEA*wY`O46FqncWO%P$i7+1NT*lh(g&dY2pJ4vd`kzwtbU)8AdIL*@=WsdZw^G$0x)G|Bl2 z@)JVFC`ShC&`PkTTHL}&K42%zev*?4CV1pjS;!VhuJC?RLs=XDp|zr8ofw4Ogn$pL zFH_sb;4*OGWAwD5czI`ChFRcUjw`>x=<_fXSNPm{v3GC%~aH|GaLHnImEZ_(V*;n83Revx^yEZqgN;Ied9A zv8DzzkkL`Vd{U;m8qySe%AfUoy(=5X>EUPrkCK>39gDCONvjU!J+~P=HC5+&5^6vY zqxey_eJ-21(yz9!ZtmX`pO7%^xq&6OX_)Ye680jrmXlvMRlYti zwj73#(+vN{Z(hUFrLIK7Kbdf$J6bvG-vwJ|PLJ3^$A}sNu)(Au?VHbUmC*Z7C{w1W ze3{Lv2u()4+C76K#wj)Kg1e{1_Gg+0Wuc2$UEQ~)Zk`h#_q@{UEk>(YU=6)KmSD2P z*V|q3Ps9>u)V?%D|E+!X${aVZ{>4$y;m*%wFB|^J>_V{&6H!ISoA=h-3X}{Zs&nz~ z+GRT1v?Zv2k;$H!Wj*%nnP;zOS^vEuw0cl=cC|y6%TTmjP=}W6oO@vqGvk0x&UhG!v(%mXBkx}ab5C#B(uo7XvLFLUlOW0j0e^zzxw?2(x8>kJwldv!|j=8=I2>jct4JqOS$NMC%4q{-? zpg6Ng5w`?t1?^Qvl16+XfHaJq{z(Xwly1w=3c|ENi;Q#lT;g`{hf~g0<^H>neC6`x zf7;H2&?1^ehMZxHc5hD}7hq&ufGz<&}q? zQrt*-VgkD2FBzs2LLkHxigUz`^L9e;>(uYQn1(bcY09Fn%Y;48!P> zAEa!`uVtTxMrHFa2uo2$#vOiZZeBl3T1l52jd*KLkWY-m&A;l#lfFCT<46Up5DwzW zULHQ5INq31SsQ6Kjw0qL?jOVK#5VlviH_8h*A-_^MbpB}x@K7aS2Oz45dF>PIbDIq z>tn9b2Ghrp)U)b#M3M*69ShEQ?ET^J0_NM*i*u+suqn#Ak5oaG*s2D3?TgFY`=+qw zCx)Vqb5b8F3Ht4upQzVM@YsBWo=(GeglCNw z`FDFuR=mh@#R1*1X)ow9CYTAM{tur+gw$!_(u_w)b%c0{*@4pr{b-YE^GvvtgujM+ z#7<9+l9TJqjG|SO4ry>Es(yY#KeHO5rU9zTPa~JSa|}YX!bvuUVy-Mnw?DOtY=1St zhyIe%GJcH!L!v)n9nbxLZr^%lPwbE}!!k7IJJR&(0jy6~7WRv&OB1_4dzCg0h$zY^ zO+X5U9{A5LZdkf=ao~n*yw%FD-OdEtJNCKt{KpiQpq__6p|}jH;gX{(IfGE@;<98> zX4vOdGYimn?@!GAB&deiuGpPoo%>0ZEB9i|{clBj;f*kl$;@BES?Uz`$D49Dc762`S2F?mw3!myO|O>-wVha7sL0asMb1heS&Z1^s@|0hL-*qL zQVACyrpLeMm$SYlICQb|JH&5WJ82ilJN<*k-E1U6aMsLJovhjJ3nZc6zp$1|;*=z(3<^G^GqDK*+GW{kK#=D$4=+K?ym z$6XvvlObR^cYAeqJReQw*m2~93-t{cL~Xo}hv_b2;w+Rxw! zkSWG?wNCww*Y^?VrJ>fvPaQ-B@PiXCV@&_|AH;!W7@9tXO80|!vSUkz;>WQfu-BcT z(~c%MqY$y#kw~Up8T<`xgk(rx138@vMpDUgc(hs+CQ=d$e#G{b|FsdZe4!6_Rp%oz zKYd3z#5@sw1qTl@yvSLl0Di3?@I`>6^qhh}kt~8csoA?9rRR!5J2=<2cc%fUH9;XMoEU5I}_x$-@jvqg|ssfKs28N zvL#2oqLuj^w1VJ<bgbb_nAO-NEKUc!4Y0l;NZ%c&f*bVyTTbW3^NLy|1ATooBK9{rnL? zIyM|{Q}Q*%1sB6<%^8!ssXP_4XK*IG&)E*CI4kkYT@IcaeS2V4)QcBEyKt>~_i)@E zwi*DH0=90)u0-UfTbr(YH+1u1dr3~%T)3@_6z)wuPr4dKgv_wvHn~{-IPn+AZT zj}J?n41@YOT;J7qY6RkN50h;38aBDVd0qmm3HO;>L1j;w;XiKLl}i~Wi;O!d;(&WP zuE+gHS!ENDj(s9Lh|m^CN#0*JX&RsVMLMl#GbUcUz$40-D09DSN!NdlAHV3GIrb@2 z5M+A z>!bjLawcRO;#n;;p5o_KI^V7Rb5{a_JOdJgHdaY~`nvP1T z)<>}DibljPB|3DBDbXqkfxnY`DV6LOPoXrYeTXH~v@o-x7XPP4-ysE$F50gS3sj#$( zl><@5;5mE`jJy1R${OV`BB72BEN0sKzbhCRr_mpkvCIQ=2c;~3`BaRJENGc0p%dgI zbQys{-bVs0-4|ZcvQBRVITXUO^Q8P(E#+nXl+d~OU@ho)scFKgzCt)H7H4Y5HWIFdctBR!HMlaa3;a@{9uf5m?=3D}l>w!t|o`f91l!N^6!AD--lNGA40 z^2diKpLv;jaXbh{)*0-nsOjMk@t=6ypnZ}b4%CtZtEME&JPeHf_P)YRvg6ES7E$($ zf0TSeJT6!jD|*QdqUC1B*$PKiK53Y-6JBgAv^KA5)fre>9{SwOCn;Y;RaLd)}iGmcYNYug*WBF48mXJ0ZM zOvmYy!uLD>EYWS(i{zl0u>59gTvEG>nT!=Tw*7Hh8)reA*}GrTgHddh{4XteX8_Mh z&RT%3JUM4By>>@xT8zan)W+E8E#`M?6qX{s_`I9vw~w7AlS?@CN#1BGkmkHpFBJRgD70*s&}*?_StfBAX;z&xv9=AS z(rO?0{o5};b}!mbvr3@+kGv_trGhMm9_U}y&PU6vDv19b*xVfEq6ziAUips$7*OQ% zRN_EIJKrxw?SlS^;=~%bIjISGrbtD6{(he;q+h9cZH zM@k;6yh2XWV+pSpzDN%i<&)#zp#rY7Loxa5;bP|Mq454793Z?xBn~e}ZXO;!NU-9# zeJfw`Sg?E=ySPHz7Gj~kQ>uOmBM21ny`7WV*zosPJKN$JcE|!na0w3)wMj@kVoa9I z$0ln$kHq2t#0!Be|ApVaO|m(l$v?>;;w%9RDQ*16A*hQ1~)P_=%R4&jEX{hxZeIY-X2}2}u)RmO3^Gk82w{St&#om0V|}St2pW-tk!T z17k!H8x1g<>Ib`81O4G$8fD?kyXLYAEqC+lP6gAEy&6+};oVtdPSvi{=@m4y{yNr+ zyvM#fR;{1PXQ7Q1I0WPe(EqAOISSOjxTVdyb9Dvw`14SThxIJx!t12&jn)V!znWit zOdW?0Jxfwg9tcnP6g>Td`tPgi=Q$}?(qn4Hi=^{+8R5iEMUK#+w|sT9BP&8G9lsfc z9IBhb!CZo*|B9wA?H?df=e)2OZL%q_N$cy49|Vsb*MR6Ry`4lO@P-w6ll)%0PyI*S z1K!*Ewk$!0*-;4vBz&;fw?^aeYt)$q$9C7ks2=O?qGHdUVD6En3Bp^x)Q89R-O`WR z8}Kwfwc;=956Qdz^9zRay`t_bjHah! zy6y!{!(*!s8p(_#v7}fqX-$$?*C&%d5;#O#>#p=9)!&QraKpjO_lQR?13|6Xnr2S6 zcJ1R~Z6xH%ao%&q0vjC2jQnqIvF)ZBxXiRz6r#42IwG4ez6Ke?MDHFRw29U;^+ znVQ+TxuyKami05`1qHDp7K*uAdQ;WHaZ3b?@G;OMv1_?GS}X(0u?u*GEYyLLQpEZ= z0g_f1#_;cYeS-W4^5(e_G7tSo^lWvJCr16rop_w`)AlYo#)<9O2;0TqGE=3KN|@Y~zemT>WwJj>SXM4{ zNi@OIl;zC%tM(OIV{>BYQO!&K*rjA0ULx}@CV~p%z&x?NoJ3Pf>-fuwt4{dYYTuWz z_M+T1d&H;<-iTzOhFf6rDq#h}+U`}x?MKh3ha3?`TO%?JVtD363#$npym=zy{+OAj zx#*Yx5K4)4XQiEtr`9U4NNj|DPaXzZ8#*!XpR*O$GrL}nP&8T$QVN8nA><%FAYCQV z&QFKhWS*=Y!prHz7S1=0us&d#49AudR>u8O)s+f?H1=u&Ii5z*n{BhlY=f0USgQjh z0hC%nkVz=lMplQlG!F4#SG9dJ3 z;s0g{_ghN^XRZ&kszWaS|iB_aA6bNyoNrb<(lzWXI^(?ASIswr$(Cb$6flo;${SzwhTBBRl(f)~Z#jYF5oz zPckC5-o6I|CR0+Zj4~}PEJ_tt{QY_(xhBSfrG3~==ZVz!3;hQon`%FiG2OvW=Z?r+ zfP-evLn3)UoYmwd?O$fGpBESCg&?ZV2fWBZ@YQugXcwK>H3kQ!7@7N%S5xk+oa@fi*B7wom_ z_)3msq60+>;ZGC+wDRpEA{LbmrNBu?F8rsZmkd()8|o9l@x^|!(~=|m?6oIBEpmy7 z8uTog0}`c1RZ7|XUl+sLhh0!fx=bYf8B(ZFl+tyPG2!?LYi%8e8Q<=9gmm32vh;T} zO;$n=*iDt#w1S>0yp<{DgeFM}(BG!HT2UjeYZs=h2G9#%$2(P$jy}D`CBq zqyk;kTJG9m{I}@o&a=1OavE>&icC0Ee#E#aK8NZ4R z*roNTqz*bJPYpR$hqK#_QOMAPCPfO%jy$&an?@{i<)sD=V1$j4dBT6!Q|A@iCtK32nw@C?D1 z3-w{i0-I^|n8T44hAGShgqHpD7M8@SYpPrP#h+eZSB2+Bmj&`h)8mLETErNx2bZj9 z!VbS`E9gJ1h5rOOQwx-LXREzdFqHo*n1CbE^%nJzoL_2scbjn_V;IX3_iIW0qzTG% zNHbQ2s)ckck$uj4eN3`YW2nseCf)T#Fq~UktQc}aMJAiRe+!#X{J!gLfLx-Hoj?MC z?>+}(w9{ufmk;dWw`x37QxosibY8u{bl9R^~3@K#|9@Y@>>&QZP4bVsMxtCk4lO4lEdjbRjue^C3 zfr~4xjg|vusx=J@bQ0uMo&zG!IYNVDP%&2bm0@wJEhaMg8b{?Y>p3 zxp?4P*7{pHAdgM}9T!`5%}N4*Zp2_keQ_;-W{~msQ#T9%sVitLlt``I!-Z48EYLTpi`0yh`5sN%V{gyoV2F+txo(e8M*}a66_($_IWO+ z_e`gne$Y^?yXPl+Uv+4oAI~IT-<#=(>MbH&H%*a3!UxO)g>D)k3E{NVHBUy9zH>5* zgn&EOY2%Q<7=cz$t*phU@<-q&~99UNj*Ssz$ zx69^$z}T5w?+}-0kg4;fVFIlZs6A315Osg@U{j z6!6xBMz3T|izL@|jw2iD*G&H_mhBv$VgLE^crdHy9R3@|2fY2l$?+7I1Hdi%SXQ(P z=}VhQPUrmvz!1{sOZ=vAFWptg0|J~t$r*Fn2du~SZlBAPD=!pJ{Q-@Ly=G=LA?0zd z`N7;AUM_j$3%Na2N#a=}mZ(v-CjZVem+v&K)Uv^LOe zijq%4?iat*b-Ez2BoeGW0&^TcEkuqH)Ftxv%syD)VD?C0w6*2vu9MyT&sW@I>}<rj>uI|KXEgu*l)(hU8%^^ zJ0SduNLke$&y~#ZaU;R!y3Q#hoqUSYp9SnT-?joDl{?Q_2F?ir&LA+(3mqq;(rZST zU)N<94}u|2!7}*;6ytC>$`H}pbC4y)_;ue20LERNo$((USGc*lGID z15CrG)uB+TF9p^2r9Qc~B3RE%`0Vo<+D(yxVx#iq+=**)thh4GYR< zY=aY{;zD6Dp#09ZO?HBPGklx+W<>dM)P=L9pC4_Mtif-nFdyJOD zwuXiIS;G73)k9?@mPv`C%5O3Ef$lVd2xL0p7$gV)sNGuz&c9m>0ppisG-`Qtg%KbS z#z>|-t$YBRu0uD@$il}AAMHpjMvGkiQw*NK8jgkQX@fEtMrK%gO%%spu!JP`qZc*= z+HBY3NDGG!C%@7mMSn89&_Rfn4lb=)dGp`3QNvtC@RfXG^Up4ciFIFJj;y_jOl(T1 ztUtU=g2Zi(_E5%o-#w2JW%S`ZUIlF(N)52)>$1s_&DHAC07au43EVo;j3^M4pquwx zjRu0+b~)sOB5dBi!ZSROwiPl)V7*=LMDfCY-YiLzj>@wiiqxbB*p2~&Nw9|u zCI0c$ctO>Mt@DuIn9u1XB|m1BzzfQ4gY8%@`H_4HxN)c985-2BL3ishp9 zwrHD4Jnwf#b`t;_>3N&wlb#mxBZo6i4fy-!-#{sC|Mpqx`Zx-2*b7g*FQWGLQsS#i~}G)@o$;KKe})_SU5Yn zm_u1f^e&e}G0Ky*1_k%)=m^C^p3b3>`$B4a3Ta5eZZR3|l}7Tg{*h{M!NXG+AWGXa zY6J78cog8j7cT=oi=&M69ust-t>BW~9p9K~KTQ~yW zUl3Jvduk8&m;R2>*!?GSIUh`3L?3xTG%S7w{Z2V?imG3}eNCs|urXr}L-;2MeKNY^O- z6IyG(UOKa*bj_LsHryFmN%$_z)v}ap?iRo2#2YC-z6EM8ghj!>rYAaIG|CI(cfFX( zQ3YoZAwz=tiwkW}*ZdN1DMjQfka)PVyV&G#WV@8VP13-*HJ-{w)HXHUQ>T*t$7fq7 zQGDNU(|)-<80;r99YwRsPF+u?k_U$c9f=FG7n-V1<1}ehV?Y;!B&}50ylx_$owdOHrqBBC>Jn{F7VTgy2I)6H2*gNAJM_# zpD3u(JYoKuDPQ3e4%gf$)c<&kYiLmIp9FY#yvtOy$X)kCa%qOC=Gc@02E_wlh*-+T zlYb~>hv0tnM+@$*v+!F}b9vc1}qqLsCejg9MYloa;G&i_sJ-8|U<`u{yPJmCB* zA8a{JZhQCSfO5=Z7Lv2&Fy0R-J#{oF+;>J9LO^6nC-*40;@CxT`RJ*v>4EXPNs7=u zwizdMipvyV?9;lFA*eZoV4OIS0kVJIl7494-XL~qSZ)fiuYAy<=Au8#i$V19YO23% zxfbj;%bjFd6zvuYJpBi#rx=9srVHk>qoXEI;j=s6&h41XFNVeYR=`4`K2}2KNwyw@ zRw37*!<<#6d7L28HPrd&2IQSjDk8+eBl_p+VB;)M!pCYlsyZz0X7)Zy1s&%Q+MDWmnOxAfYkV%+Pz>OHG+w0ww$gY|O z_E1l2!s~v*V1&es1fkGnz$v3e4^4fn4AC?i(zLV;F)DUD5VOIakOCsf$|$IS`+bgZ zbRHo;L0R{PdthA-UTV;nNo6&oW8;4|&+iIJ)5SQ`b?0cQyLK0~V!Hn~_`^xFGK|>q=pA4D`~1&2iYU0hf8+c6T_KmO z*GmYnyHzD9bO9BkW_F9@?f@ov{W(H@$!R_=%8eew873S(304?kH|*w~l2Ef~NbWXs zh8L~qZrFE~O|_Nt8WaMB_tF;_^AkF(#hxztCoSP``Ll)h^-j5#S1gL!2myar7Bf(_;&ka5LdJQnRB1IT zS#WG8G5L*u;|REvq48LtNY+I<>11q#1BGXxIEfV>@1|ls&MM^{oMR(p{!;2Xh*dr- z{-)Ba1+BaM+lRzvIZY%frjlG^+w%cx0!8Nv4oVy#cr#lV`?jBuqoIz2B9V*wnjC?_ z_KHcJFIMmS$KZ*mEmj<6MU=xwN`q(u9O%}hElnqY|+*3r)}>cEGD;F>odTF{1Bf00Da8V5UbA3YTL6 z4y$1h9VOYcOgM9Q>)1af3pr$v9~aXRT_5UGfL9AW8d_F@`Swbsy}?rYY)@(E2b}^m zbUiILG`;;qeuT{R5YIJt{jf?PxgILfX1ye-{U${BcFj(nL=>J7;be**)=%~)aylj` zWe4+SY0t<^*!63_*}S~e#R?`@aRM;>0NQC-clHoR@0^u>2!2r*K6(4E>jNgY?g8Sb zPvN41euDj-bz9U_WtsL+2ho0YqG=}ZMV&Mz|Q?Q3wK!tokKHYuxXc3*)Hi4~nIb4tktfF6&v`2_lP~ z{9HyF3hxt>uM*(A37c6CjV!1#N$C~w4r)uL8qm>8lg^gL4uG>A(E-8tdVeP9wWXGH z!JswtZCb3GAs}YL$})L&p7q>HSN*{T|K>mBrtoOK zuheLyu+^qSthEi01hF@RWggOa9H`*Q)?av8ntB($(Hw6yx;SsQ{+QG=(ur7ZTvUd% zNcKcE)!Rtk^;Z(^E_+^$OxSBXsuv*%j45qoE)^~esw6Bjj`z@nlg^$@-rk$#)97o) z4*+)6RhoS?sApIZLx_$5nK9pvBgkSJ#&Z{hb-p}p=riZKI0PWwl|K!x32ZB1)|0b; z;x#``v=&q6afnbcfU%rhfHQEeBq_WX0+cvhaQ^wmauwLauk7iW;6ru9|CI6b{&7v7 zk#yc<8c|%2&gXmtUW?yH zs;Y_W$<|E%jPRb1g48$o)4xqaTBTSB)895cO@gsT`0sz2uhLG_*QzA=K*Bfrpxl1v z0QI_k>5vcr8hZCFQu$J!xssO$0?Uf5sCn7fg#&k_lIeZW19)eZgtkp7s(yBXIy)QHDN6SOrxFp@4TW5caJWxHTiwA@UMA+ z9gOieenWSad%R6c}%_1YKjjca*&kbXT27k5fnN7^19!)~j zrfdFoY0B>ZuwH6j1?v*H@DB7)Y6gEk`de-6iasl#%MolV^`0+yw8Bp?8%|RfbR|QV zb*blF*~7Uy%OzdB?J{;ZT^l*{=;SQ5=Y?{pwu~{wJzB&|2TxGXN7JW@5z*&aYWq{b z=HkG>10_kuCI1H*VUmc>PfP+@GU|x>vEe6+2pkaCNe0@4)>b+d>)Q6&G~hzXM5b zkw)uB4(!}9Lk*1<>_bzX`Ujk!zK@e&OwqfZ&vtIrr}FV8BTaN1El&XsJ?sNTm~+q| za6!{x#6c*ZNZk6MK|!%d!c@7bm(y^mrZqkAn7R(lCL?mTrx&l6K35kTmlr4D@^$+p zn||$W^{jTLK5abBcaIZ)a_|CjQ2M*BlV_E5zwLpR>_d+>zz66reGDn-^JXT;Hu;TN_5CVYny{@X*S)a@l`vfaneiY3RDtR{$7I@DKNacyGDNhN1 zR0qFGzQ?>u(`M!K50Yqg2LPxpbjWFoYPY$#V_2zYp1k;LzGrO$9n9PoZyVW31 zl>38Fz3?ELBVUtvsjBtMkF^p7Gij=Q)RAE1zsWqI8jVUj$0iF?{XL)B*1G<-JoJUE zLHp`N!ev7c3PY^6*`WhD*e% zzD00+n5pOqOB%htH#m{1Ag2cty!}s~uR!3R`UysDFWPTB#>(0j;nd2;Gd{+DECpt` zY)(n!WC}_{8rbCIRIfEh$H8S{2n?`UL~Sj(jlKHTRlS78#1uK4DCT!zjPX*xp_=o9 zjCK1+U=}x@KAQmd@yt_4|&!b*cI7ON$FXyxc8a;f5deSql|w&V@{u z+n|iM%Ag_DX6bd#wHhuL_H#F#qgKG3bf?j6O;>R4=j5dq%>9IIhlNRj-%W1bWK1}B zSEMiTda=>!((Z$i@@epsMLG2=$=WxXcjp5_YOn`@GDb)YX}CxNsFDhSC5)&tRY+2J zD<}o=w`kcv->8Em!$e1kSL5rbDwu=|k*Q-8<3x3EKP_}*CBiBG7_&AxGrXID;bs*( zy?TTWR1K^|Rl-wEGVElNXKem**sg_VshDgy+fWrwM`&tn1y4?_lC`vvVHtGc6VGIS z27TzyLcLb2ct}LtOgj=bSfi^yl%mKP7EzqMuVPd2%izr2q6rHH1swSbwa1 ztjLC}BUvG)xE)RQ#b`hMcAzld$#Z(V{zkA1A*=~a;$&EwsZL?hkJ;e|RT1Z0wu9`P zA@$E}0;|npPDi3@I}6VWez9Z>nr=X0u24PQJe|pm1)ml=3_iEtZ;bkFXPV?_d@-4$ zNpqTYK5xG9i@CKC9g1%7Rd^7avfn4gq%-qz`%zK%Q=S2mZp&2a0%JN9v9aAD;q=$p zYIfZLdNQb4b_#K;Xz~{iioVjlh;q-x>M?UV)}CG6RCiMj&kw!#+q*Bvs{ENIvL_+1UgK@oJDhr&VxUHKM9`m&8)ILjkmxDlxKtW%pljh3|sP`av^+03Qu_5Dh^8`{5gV66S~%Er%5Dqn&cI+Lh|4cam~awf3Eak=Pas zgH=fS#V{HRojjX}BTbrXbLz!7%DOt{ve=)1BWif#RXq~9JK`?({kaJ5`B=n1HH38c zDuF|-3UnWM@GWBW^nh;CY|dYVMy=J%(h9z4KYaB#B`M3905!JH*Oq%dZR-B&&!O`o z1$ZP6N}5sy16VKx*DdyVRza?Cq8RFwk!_w%L?cEKbP^JbK?WuC&T>u_FcA6$RY3Kuj^=zTE zLz<*8CD&`Zv}tLO;MFzlYC!a$wW_RcjWMrePM^Gk8vF%;JCT&HwrkEed+9 z`zIiLa(WKB0!ca)rTEwXw~7Dj4^zA2WV>sSgb9IKLRF>RFu6rgHy;20ZU65#@OHq# zsjZ2FD1Hwp|7$G&{nmfpAAua?>o5k6J~oH_ zLV^nDxUR;^d+N7ike+vPMOH8vBK|Md_CNoC2p$w$B?|+6gnaE)exUFXPwr4shM3HY zmNXzeD{#>`EF(t_x`ba}+X-T=0i}{&S1ISSvN`AdaZQNJKIfycy3wPSm%=_4d|vR| zyg_VsW8nVq3h+UnJy4*AL8g-pwd@_sYfg!u$;YBS(1B|tOz zXXAs&OeGK>Jw3$V1x(FadTEbMme(=T$y#dnC90QZlq^6$H`}Nnyzd~E)ft5v<8sJP zcI+XHtakZ2zx1dx-b_Q&dEF}w+^v|U-_8Qj1sY@1vYtvE2&-cG0F!Hj0cOJ>D$Uu} zBt*T!7BUL^M3fTuN3E8fOgFNglc00pRDuc_29TOvUE`hW%B+=iDv>`NA`?scV~=s9 zJ%CxMLla@0daUFD@YL?9#O}NDqQ3jaFR&DPuqs>!2wiS>8qI3~Bd+6oV4rhDmVcT~ z7f_Rz3j&sSC=DKWw5iMko2z$v-T(CWWFSPuIGlm`UPr?=T%Vve;i#Wl<7Nyvg|fS&wOF& zEqHPM-jABsSW|OD&2I`S`whBaEr#mhp~mXgc-%mi?|aXM`UYhN(Q*tAf;I3WOJ>l! z?%1a~lO%By+DOqKO+w_SQhMt@w?6g<&_ez*WI+{h*Tv+rJ6C$-M~=jv$MnqYlt8A& ztx|iRE>^?3MM#|Oem4f;vmA3mp!W*6(G#bpyyuq3OA(O1$cU2&Y|EGK>oizWK4`Kd zl;kQmkhJx6e=m5;VQQ0rL*)nh#+(=M)pA2EO62~7>96J!*6YxBduW=8`U=|*G&@8v ze+qQr@M`VKH@E1=J`;fxiv{Mx$^27^b0#LfV-kxp@sA9yx~n+_@~PQEKi3uq7T8U1 z)C!SMoSVkEVvz-bFAF@5&A4^jSn=q@OwN)q?u#E%$MdC9HuX<$S3Nv-1kQrAAhkI; zwZqRwc*bg9M*=w^!m-RQWrkO490y|}?S9Xxvtx`J*AYGmj^LqSw{iaPm3f@NQ|Y%M z4*QDi>FSo@98c%oAw_Ri0eJP>`@5%lR3H^#GmeEc!_By`EM8d|e5#=j0sF|6Hdj>W zVgahtB>2&G6<{+@WuFCLmo(0GMiQc?53W1PN&wWG$$0wG7#j;t1;Zi=)0mk67l;ov z2W0Gt$JY;$eJa4jOFc0Biw&VV`vcKLH6<6=<_mR~alIH^r}0RBA@b&Y3^9ZrsZQz4 zr=A8J1TV_CQ2IK(YQWdteoJ3i&!f<8Nu_OFgdASh+;?VWI&1_ZE`5|cE6pqUF^8VL z35rY8#I9A>XBf&c>4EXx`Fze;TfQ6I#}>TA_t|y`QQF}FA@{FqvEo6c63;SAQrqAb zovVWj%iz+5T5W~Cox(6|*URfqO(%mu43{g1uHdon+11&H7r%(6_jz%Z9&tEIjNNeG zLUXGwIwf(s7l07re8(qu(CAn~rz{fG)?gGf4Dpkena~F$p&$|@e^@}I{^x;ML_v3+ znEA~E9H#=0+(^qI&J2rzU@vAIk$gF){ZVFm$@FH&^h+=%N?24_xqgjcmB#^k<>o`Z znMjO+07R@ZJ1K?lUf}lY!?Y&0XgGa3jM>Nt$X2Thl5n;#+OXTKe<(qGTh~Vuc;1ce zq9z$3tG}zlw!tfMx@Vf#MWgb;-4vy&C~CWb1M>&8%T+s=@KTjyXn7S1E8|~5qC)FD z#mDQubi=esEl5Ol{Co+H7arx8e$;LK`tREb3PgyIEK&3%s7VP_(P%>GjO{t@o$tz` zE|EH{7i>$QbbPBr)vFdeodZeS4LgfUOLjdU_Oq&j zev4{?=2P8k$jW*IE#Yri;KJJyZ3$esN3G+U?lfgvas1IJy6Yp4t5_t`5i(TQT;pDDCWwFmmB%flz3U7A@$)uCT-mDnr@`3@ z>S+pN%*Sp_GVf+}7sd@@1}dfSX7(qzVJLz)|0;Bjuq2|8DpVorxrfJ~pUg^|23%rd zNt4fqa;I!zyv~kg82m1td8~=36EV6$b&%`|pf=Pq(F7hTPXL+HTqqjaTXmzVreVA8KyxWbKMn)g5 za+-oaV*ZdBHQ}Rrdn#Tal&m_oi?X}f#h?y4oi?nwZ8@qjif#N9ss^+B&t)G56$^m` zul`f>Bjhh7ucp+B{=>*AwLa4|++K%7ipLzY2DDCD_^-+68i+hU0zrzWjBjh@ZX;_} zZZf7a`FvR)v--(=h>Vp0Tw6jwhhvW)>)va5~mGWHr!ySt?nt@eV;+3eIICBwhL3p zN@ecp`$EIf`8bPZrOsM)Lg7G&Xh9klbBUG~i;@%=DNQ3r38tF2aFojWh>zACApQ7W zA)+d+)Pdd36?*S_K(k}7-x91?B~ux2BNKY>EAHw zQoUl(eM_H|E{{`Ew6x5@qxat*l^%XwsBYkV9qO`3z*%ipI9mf(-Cw1Ymn~PQiNS{@ zIREnl8y%g+_Rj<8&cLf0d%AErcP5Lx3)q+27Mo16I`n5KFg9|P^D8ph;Co`uz-bzF z7a$Rvyx|+{(dsDfKCU7)u=1$YRc@21%SBh*itgB1W+0u0p*UQaFd+Nm4V9_w4$8~+ z0>Vv+f1Oy)X(IaZ>ndh9el`h7P+t_y{uGM0?%1NXSA|-i;}j-8M%V=DoSYo#`yb=r zu#Lf@g;4op*NVT^TQu~9axy)c{pbtIWM84*9k;AwIuYYdrmvJ74(iOXGUx&zvQoXX z6+_`LFFut}n~bNVft3Fdh&ybG>1b$G_wwzRg}EK02AI5s?dvD-~XRcw)r_YD_f zwX(D*oA(c2LJkWE`BT$t!)s0CNY5u?6TbOAbR)&yZZeVGpyj7mYxnti{g9bU`p~lO+a=E1(_@h`buA5_Qnr*nCJZY?M`| z%Z%7y=TG%sdKb?)kUZDU+}r`gE=0&b#CW>OHTTCL5Ugf6ybY2cgx=U=pA&Z5-(IwP z6(i;rx!1kyl*K*SH(C52g>xF;kau?(>DdyQqDKwH+3iIX;!PcD_z1 zsemd^X%%-=*@r`L=)GDT8=|4x*{nmyJm)G!!-neW+JFEQC7J}do;{1tyGR%lC;JS~ zvUs;CJ`_2(wW;WW33K;s==XEzMAeH#Uydgs3ylkcKnO0$PT$=twN=Bf;s>J_J{~E` zZg*0-vP`f|$G!W~4h5#7!-VYEv5*V}-*gu93IB)Ft>n&}ST%T)=cTeGvM!t;o#>aO z-g&=FMqM2eGN|f_wlP+W(LTgFdn7E#Xdh?|i>7;7$vt}=2^zLHJ4tblf zvmq%D*SnH(_~7Z@%rWECXgy(ymb(+Hz8i+Y!_`zBi;CG@B^HRcUdt;rbm`Pj@6$J<( z-VcAp8_eyGLsPlvu?2iOz@$8Ny;LxgGgi)TF0eihn#=r#AP$>RLrs#)6Bl3Rxgn<1ahHTP+NQi&}>_&%n)XNM{hL{O$o5`WW$M#Hbt?pvKY?O7T z`BOokxXERm+H}s#)gN3|c8EyuAnY*)9?b%3GZPc*63fm;*W2rGUJG3sI}ii*hJCx% zc!lU2>oWGIEafLCrMyL>>jjL=lShsNx&9a*%Y*_|;AUd`cw3Wv$64^P0W%g$? z7k#iv&6Wct`t(PlEcmW*+`!awRX+G_v(wf)<@VX`RU_}0-W`JO^p`A^y_^3E`5hdW z8M=JjjUH8{TgIy3y-`&~Kg}?_zs33ZM$54Kkw6C~17~cW_pP0%ir%n;tw|1drDu^6 zgnTb}G!7_Zl?Uo){Vn`n8u3{+u7tcwwYb?eLV4gIi?)E^r|&HEg;1^ar7t%(cZn~S zZv+r99(2!aR-ck5BdaxDP?&3y3XV;jks5DzO~@MhJMsXv$)5eKGRJA# ztqWB(hB5=2Q8g4;#;vRR3tAbow~0N=(B0;cf1|d=<@uxaRnZ8I`q)n^$?Xc`eXYrY z*6qbS7=O+{#>RYl^tMv-S7MsOtQ0d#Br3G)e0qB7Xat|9D5MMYEM-(L_f8_)-p~5I zC4j6I-q3*v<*f2gg6(;qq zJ_66pb}6-t$Rc85!UhX8*NplEfR`a3$TxQM;uPqR2U41tjJ>(HI{A{MQ_DbinO9`x zGBbrg7%FyhwgoZy0B8;DJz$R2J(cq9<>~q2{LNpzJy&q9>k`i?fyHK7tOPpTBhvz- zAk)RmUhDPRdDS-U#cynU2#WM9GrmmkwyKq{H+j_1MegOh#mnY`ICg>JZLqhX%=($S zjOz%(kP9yUg;~^mm>W&ULI_G(jff}V1CC7IiQ%0aBXi!=g|GJLZJ2&EpH7cZ)-hu6 znw{|>{y)DSOZqB-@d?kr#Xe+il2xDoI!+w}(|7y*)RSmWmzxLZ+ux58E*FX>;ekkI z#*AIvF;ZWZKq7cKdr@ETT!EBeWYr|?a{Zt2`2yh*Dd>zGEpTBM7snNRH^~qdC?_}J z;z?|?OcS`DlD||Ot~y96Gfaf6vMSE1A+3Jp7PS-8Vr57da!N#t(o3G()$6Jni{c5& z5W@PDHXEA-Mg|h4Q-rAW+^3-A({@cM%~w5TN)sS-e<1;Yj;et#om^3Tr@)OjTJ z(d|)0yv^cMlpgDp$e1kut4%B~SBO3w*(t72ZpQ;`RN0=r5Aa=}*-K0Ge27=7&-f9O zrLmS5tZY93vzO+EfMa|QD%8SZZ5j!vB(lrVU))?bR|QQMgG}Nkrt-FcMGor2SF)i-n#TA2;MvC|ti$52W}QvcD*#ARuVIKfn0T^oPI!8LFT?AVIR zLYuz_d==Jza5M)5|E;@A{sVOzZ3cs*wu379&4miQ0fq#D`9q_!deI%yNfKcXP&?4g zX+#IYbv~sL5lOXUh}ut3D@>Iv@rRJD{UKR&?C)6*V1kW>CeZLF+3ph!n%I4_%{CK-CWZkR5?^Mn3Nyi{dBz*q+(U?-E-S?x(gU+M0anm2y@%-VF#=q` zd{llFcs}KNvztFO;fAl%0P_v=HOSra8h!61D@50F7Ug{IV5BD3kUh!CCea*0zH5L;~eIV zn-2mIuD31#j0abB@{PGzg0r+XBF&PS8ZieP>trrfKnJmym*P<8%qYhCZcb2?0 zXI-C$)LPf8DtZw_y;iEnUg4Q=AWI9?O3Sg$qAYy|c_tBwZ@JYqsyy3gvYk94c)LtZ zTzjb7-{K9Hc`O$&0ZNQwakjQpqOs@v>d=>>f7)h%%b#&Lf<4sU2fj+`-*hVRJE^PW z_ji!N`t{Uo2y|)`8sGi^(1B=mKf|f4aF=U?h0QjO2U2l%im@P#{doc?vbY{+gt8gz6c|k->=@_jeCgY%6&D=J4aCExEf_ zZ!DbRR_ni70Q(8Bl-K|nhl%J!Mq;jL>+T*xXBQWxHZ#}Y&z6qhp5yk`&^99RlRZ|s zw$Z6`YdD691IkrOv1G1o%-2-@1#gChPalOeB?>J%HuQUwBkGN7`9QUX4C#?vY7g%$ zXuK0}bC+JsX}eg0;rPoFYQzMV;bX+(b;{+fAK-l|D*9g*->0*we?dWk$UP1KlN|fT5BZ!DNihNbL9r9} z1vTidbh8Y5(E0XTm{dsBFiXt!8li9ZHzFXJ-8ha?#JgDRa1}#%`Ku16A@C`IL)3WB z6NCxV`V}i#GcVK7Fl8gvNIAY)&Y<)b)o=K@_t-OPGqWQE3F1x&_+iN2N{k=OVfSDe zKa~ZHLS`PvrJj{BZ1b}Mnq_nJkj=+@f&_!5r6Z5RimbJP(AR6&;u7)qQg@rCbDl+h>CRVOZO7hxiT*&`G zlz{H!a2v;#=2`boMqKx-8OBsx&QgPl+by=M@I&;r@uP&#-eO7Wxy$lQyaTN z!qtC9eNPmJXCF@+@#oyOTnwvSX%#Cwz*2Ol?kO%Cnl#)kRj`ee$!$ou^Err7(=%OFr`AYfkK)nrh3geQlSt(QQ@=z= z4?E<+RW{RdUoJKyKeby)A&34ASy`C&?J+ppP4)Yc@3lXTscHN{%p|>uehy_`r2!%j zLIL3FaDs(@Nbb|sz4H|YPGUZF3UHYA+8x%9=ZplVglfX{-~>sZ3IPOH?dOq}HI5HYkmQpj(Vfn~r1>p$};5}Nxpn&2)Rsdw5A3vO{6`{&~)+?u<| zEz|q`zS_;!eNW$RYAqp@(G;gKXHQU>o=94YWKlkN0pebL$ZUl?`Sx|Ivu-`Q&fyUx z4X)^F$^G=IhE+oqd3H(hr5q$QOz=(O(~$<>KsNz z3I?nF4@gvWs8x5}sL-OR)J&#P<$obl z)Vbg8JSJI)Pg^-pA>ALhul}Z1L+pHFK49 zgDt;4a*nR?r!KU;G?Df1y6;#(PI;Cm8uY9s@@vpyTb z2e=K4;H9&-E&JejVBaN4s$L620YIt@t&7WFZf|4oqHHFc8!J9l{#UB=wnuG6*~#RA zDm`=%4I)-y8=HIWbnkE2M}KPfGI+V(*#kY=Sldg^QB5b-m?d}7{kZH3TkE*{gyV!zkdd4-t-|w;^JXF>*U{FW90EsR z192p(42?gZV+keXoj&V`QwrkOHhc#9GT5(ToV#r;!dljJo<`}`id1T zeqsOkT@lofW4*<2(YBA0dzk5#*quy|)Zc^k0(s-}1>DiEc4gPSs2ZzN)e=&Pc~f7! zRGzoph>1B)BE_G!aQmP<%5Ktey9u_#p>eTk7P?0r99tCz_$JyD5iGeZ;*}rUU?x)s zDdi7WU3SCB8@9*yzu9LHf8E_E9u%|##)Zq(6~Pn7P4R7s`6SD z`^pe~lcwSOHP+Cf_sONgg;!aev(ib7{YHvgPe>g-47ItrhljS(xkCd?tL36iLtSdz zW4ljrU$w+LODX7xWU3+M)&lk*CZ5qdg}2%}zIV-olbnP2m7!BJV?5*gF}KS}mRd+6 z&(L~FL7gEB$}Iw_IV8CFAv|-|)k|ASIBP_wbg5>M*<*d>7S;WFd;QK{YVB<5dt68b z@L0<`r{U5DWw4v&8RNMjfoBW}P`I}1#8ntw$;n7-s|;4WnSjDIvM@eMxa&1uUatT~(n&cbOUA92&%L!lc~Ky4Od<`)Oyh!SU&p})gh zG7pT?RtLRK@q^vp6^>j3W&TSf7qg5UHDjWW_p)@Jf-!|dy{`C5EUKkwbx}_&s6igd z|MiCf3;6B^yBGFj8ovD}I6MvfSArs3?O z%FP)Xz_D5%19I}Ymc{(zjo4IgJAMS40Oi)zJ8{TZVc=x2g$QH#*<954ztA4rKTa=X zaQ!NV&K+mCdYY?4LFxPcVHNj(1YkDWkt41KA)Kr%{8W_ul zbrYXRAfVN$|bbVuVq}$eRY_nrqopfxoJ9b4Kn;qM>ZQD*dwrzK8 z`_|t3oO{Q2zjMZT>qpgiXRS3CCZ9D|8DYygyU=F$7D}uC=Pk;W?b7&*WSjRtUb*9dpB;~Z2jZD^alDFX8@xB>&#SS? zI7)J}3{)Czz%0V=P`;tQx6GZwvk`&W-i@){xa`eS(Sxk(=6?06A&OlX%IQfMgiBVK z3tZ=KytjKYLHES>R8JAcJ+-X`Z?JPIxifj(KhtH?{ZS7iN%$XB$ zJ5ECy4s80?RrYa+-JRmhM&|I%X?j-B&Eljx;TUf-IGV#7c0Xnz zDvb0cPLAIl>hM903M)c^k-(#BnTD)cMR(BDZ9|tz04AEjL;StNMOF(iT(|Q%xB+Hz z&y|`eG|^I&(eRGaVtzC*El|v{&hcqia+6jOgU%Y8MWsltJO63Iuu>FqVeLC*F|cu ztOuIalTlg|b=&eC;R@~ibT3e&e3)O$#=d^Z7Ldlt6hK2Vrj^NKx5r24#D&d)B=P{% zfv6Iy!>7|VnPIte9d+mc3Q-crO7v7^FSw)_&Zb4+s6oGc4k}ls?sI$|%mFvj$8!wxtPE_)KzI4`?`z-|A0_Ii;@I1z%)O+B0J zk$ZTAz!p@Zz@2BnBVK0bsZ4o|dSeWgq$9tKU6WX!D(GtIt+A_|%uB`8)kVED*@FKS zKjDRFA>!g)Z&e14$11e%?g@cU5@(FrUg5&p5axmES6>kspCXT}#~rqsW-o!a?!8Ne zNnr4KJ*BOaHpy`|+=(Gx#@q=uza1f%BonH?QCDtgO+t#`sfq^&Yt$^haA z|4i>T{4c)#)dVLPjLV;`V$%f_h~XAJ}N&xqvs1^D7M@^I8n_hN?*2V=@V zX!8Ha84|LusuGOC{X&|49fsn;xo)mv$el2)B%hoD!QLmex?7fqi{@}4%fIIQ{~w=i zQ{d5Sa9byCayBcKz!)8atrP5o-*H0_jX$f684QV75b;eq3$y=@O9R_)fC8lmadXC` zm?5(2eJyKB1f{n&b$gb=R7U?*;JD-U`D!O^ybs!^gNT@fI$X+&UK6P=my zg%C3mdr;AZgjw-anOP8f)t01`C|yI8uf>{=s8Wu-%3kcK`QF$B(f?LUY>Y{Xo;yH? ziAN&;;;%nKK#Y6NvOw43-Dr8~*+%{cNs2~dvmIi8RtzPbah1O^f?r7m&#Yrh68CEPA4lyRTI?-DK=i zxMKC3(kQI;`xigFO$eFGdeUQRXXPZVH=1RM4KV%7($WJw3GzR3X+Q(LeQ8 z(DSU0s!3zX!|MW!X3;8}46GqfL(=n8(ar)FyCAxVKH(O7)KVwK4_wsVB_=s{m`B*( zpyr;RHn!W;dfKu)GyXYE{r*mK`2^w@fzLXAsX4<@r#VD$fdh zq??@Sj!Y=7hiztPx#-p-@)lSY7u$d(k3-kGr+-rWK3?OZeCxb!MIht~DsSX~}v_vmM}%tm=! zR3_ncFmdVU{k$cJ>w+^VpGeJB7d3oS=?!p->I5_(cJUVC*L6KsKLwwB@zA(yGiT{t z0bAo&nnBYwJ$Z(+dPR5>=&aCu%?!t>%;kLMh}woc%eALY-O}z}`)# z4lgIdRa6H5^X%i?!RT1F9di=T8lYaL*$7!+J>t&irN1ubnNv z(b6%Hb+vakV#5c;KJelFMpe8AA=@o`vC*@r?vm?!-!?Q9?no%+VsCpA7fP3HDoCdZ ztibi1A}6u^pMZn+v5`DP`VG17G!iRZcyH++!N-&a+?LQRs5LsP#P|mg;rt3_)B4Bh zF9Wmz;51XS#SKDwEx%2*OEPmKZTusl;QFfNG_rq>nvm4oG1nl37tB>9Pr^bgj%7o= zhRT&j)p|?1_VB`AsQHZ%Vq5i9hbu!Hed$nvsmQMC+~$e;R2qG8G2Y7Am1)#FcP1(7 zvykm6LBFB-z&{&W2aAgTB;Zfp*WJQSo+C!#%0`hzjYU;;DtQua{W7b-&f(#ag-iSK zJ?7>rur?L}twPxv43J&_+jR$9${a1KDxF!Q2U||z7j^)dJl4C(@Etw~iE~Pv>ob@_ zjk6?mxDFOLQ6?$mVL_%L!C)2Ugjp6cR7Jko5qmx}yx1PGqRJf$X&EeXHhp~6B;lcCV@pr4Xa zmQQsN<$j#cgQIoEj9V;nDB>QQToLvO$!y@|=b_R(WF#{sT0!oi*&`$|s$R8GbwEXm zwN>jNDi+5^<~QzbUxJB#a zpZH@t*B+ zCzCn)uPW_n4xa_P>m8?EFxBq6CJH@X`HT}@WCsokt%T;feOr~Rey7Q|DsPqFpYVuQ zn}w}h+`soaM2fXD=|U*>^bgLg+BtBVyqmMZxJtcbzn5c90fxvsmhiC8y38>pGtnP2 z?N{XFKgD-haK}bEjKLqK{P83#hNOSM&Ueltd*hN`(!nYPlGS^4f+os^9!y`F_TA+Y zyGzE#)52_j+>5K6aDKgJ6UruToQpQQYsdfen7(uy#2hn}aheL4V(p}w?_$$IJ&~5@ z5_?8%Rn6-f0OtZ(e4Ekh-gg(PGokFDtm~HT`H%>#ON@J9^sNgB0h3qzMRVT{Dxm1| z-peFZw~WR?_{{F%LK)!X;otq^jxg##Vh~#JM!u@UhHE<*)i&eZS|2BNxh)lYY_utw zT&8;NYxX9gxp50@&#uNc=Yvy?+=qlT7vk0i1MvDS1~VcQR&i8WQqqCF-)Y!0?;|4i zgV)iNh86Hsx^0eI8)Q5~Z-jIt`l{yXQAz>Ri^x}<*qF0@HuLr5>$5GtgQZOX-M8`} zjp2sV)-K_28mS30BVKgBa#>w z(DQZ3;j2r{FH`O|XHN62j=_skYnzDTcTJp8{>nSKg_o#WVS%|PkDPoM#VIzj1^K6J z;IULQMTJ^~gUbM577W9%&c{%^2&A7;N57P|Ra)G*y+Ny7Hiyp#6e?uX07{s~%%w-xa& zA|>OHvi+axML$XZ4+x>sO{gpm33eKAECHyhF|Xg%Pv~fpLWO9EKAQp}@#yNWVKj&; z*%#mpFeN@9OAQBQ6ralh=Xuq-h}mYk<6>LB2X0+0R;CY#+k-u238GSgs?gi-7fuY> z9cN5-&-XB&Z|)E>OM7wMUoSG?3lQ?|YR_jW0-8Sxy`e0!>*Kq=j`A4KVQW`T^yEbw ze6z42{VD$Bh*RNTM3IP2|Ewuy6hj%jnnM4?(Sf)a+bzTu3QcwACpStw%9LC!LkR^o zpzBxpi{DF%m-#1d+Xb+J_=5fKSo2|y`lgpG3z{x&y{A>%@bonP)FmV~EX+>^of`5p zy^t(u>0zUZ0A*z;3i>{J<;pdRfU+i<#Rx{&%Xg5EFFP`pCngCeC*=?$y1p!M6T6X~ zFL9|+z*|-WcrnA+BtEswL#CY|R)VcqvhtVXBct|xIBHR(i|_X$SxkeEM;0C66yvY~ ze7>~<=HVlAw0Yo-M^EC(4LYboPrb$thZLQJ1u5WfR)t$*%0l)|5F-m*W@tSNkPzkY z^Liq)tiSwpKJK?RMPVzs0!a(aq;n;^ixpL?3m(f)POALci{b8h&^L?SRush0a zm%b1-v2x%heoWz;`&dfSNxmAlIF`oPj;mEpj5<;i{N;VozI?jpveQP|I`w3K5L{8+ z{&*MM<})l^rQw6Z*GNI|JYXiKM0AN%ljuD`mE;|y0O^SAn~k0# zS4`+5%f=kylTI?JdYzjeTd`a(y8^q1wY)4xf>N4dl;ISZG90;0dwo19_bvGVAgh2q z_F;!Fch%uIkol);7@LfWXgewGM}gxaT+TvpWvbL@uhJwvKC=PpkNaR{RGBzu9c~G5 zWwL%9GP2d>9y^gb;@8eHbFrs2HA>Nuu zoz=xtIjBE_3W&QTRv#E3rv-L#OnP~G2)4>f5N0owS|c`xcGpB{8j*d0=M|Mn2&i9_ zNr2Y`QyeJPZ3C|5CRo@~{k{d=MFq0cZmN)YrZnLKIQWr5Y z^&?(NC_$`7Mx96DC!+BuE>g6e4VK@EfQ{3trBMz0z!L>fix#dL(AEi zjn(EyF6Xzsh}>g8PEZ2<)ZQ9zMi12?lO+!p{z1Je@k!|2^_A&ep}U|F5i{&mN97wO zA1S#aL|oqH?ZcOPk6Zqb)XwSAEl?bjE?hNhfM`QzpnO`R)g*Q~tC;Yhm~QFfW++44 zux0np!B83-j86QeLI;x6XD{1V%Z1yYyDWEaFN@FNu4x(-UY46scGCWbol?J`*>TpB zJrGBVKv@sNZOWJ`YVGxT>IX4@>2`fR7UZolNj>v)_86bo;OzFwxEa%h^dfQRtBsSG zse@OLlzDfqax$l-z;7$b>rki#i zVgpk>&-nCkco_mQ66&8lRdince8yD8Z{l-?iL9>|zSgnElE{ZnreSvJWE`Z=^60YlI2trt`;PXQqW07=ko@LF&*A&tCy2JE8s6A6hJ z@hO7$NOK*VH@Q7heWYNo))PgYN2mEYvj)noq8v^>lbJn^*Le|<37n>5Z^W^D)rQdA z00e%^%+DJiY5O{@ylBE5L0wccrgeVYicnLW#r*_sk3WfN=Q$dxxR0^{s8aEK1*}pp z6~SYa1nm-#Q(hjyVF)b<8ao)P!O0OG_%Re}zmDsanY>Ur8Y%FJO3_CejSK+!j`O!{ zn~J)ac(@0(h6NN7uPyp8EF;sW#5v|dTO6|#%QG*oN?s0eDPH{^evim_zMPOEcN|<= zRGmboc|oRZs7R4Fb2(`jc#0oIcAsA!>nX*P3vFs4l+z0CwM>=VN~dOS#hV&(*?H`3 z-)8z9!iew_+q5|s?E=z`lf>+W5p!~O7k9^DKN-ao2du90f7V#~`;D&HQkBr)=G~Ku zLDPIqKLMg=Gp2PKjOU8=Kn^Nsg#ln^T>b~Q8GP>es;IVXhzzE}C~ z7dxwhu27?ydD+X|PYER&g{VoYw!-6(N%T+BW${nOS|7=Z>yK|YMYafGFiYW1HQGTj+u`?k8Yxw*wA=8~!0@c7;m1ByH+>(@6*GQV)8Wpq zIhs|V#&^Kp2xd)2MhKtcXZZ~mlLj7()KC zaOtb0pO7)pPE3m0U&E8ao0i!nOa}PhaCu{SA)34@Mo)FH3c3yr@+1}a(n7bHvKZRM z-=xvpGh?-2vS$9I*|-rUn#WVPu^){EsAB#|f|)my;-d`bq?Vxx<_3I5U1HFAl6(i2 zCB=K7X`1X4n}+t1Dwx@(Qr92{0Huh47OEkd8Ie`#H)m&?*+aO{;@5SUG=y=v5Jw$W zb)H}Ez6zNCDLonzBXS_yPDKPD=m_5lUGy8rf}AjLC@%mr{yLdtZs7Qhzw+dcL?U6%?ZClm~tO z5ZW>@T@bYr#~n z5H7JRd`kJRr~GryVMNDqSF1ph#{Xx@31D{S-yvjMLsY%M=~zd;7WV%bnFhXU>Ht=O zV(!qd_5Y~;zY-Qv^uRZ`>#Crag8o})tYm@dbE~Kn_-4T;73?bZL5 zg2B*L{eZ%;DCrONTWkbCGtx&-|ZK*p4W*V(_i<WA_E*KEMIw**j)|V?%mg8OtE`4|FWBwPA(9>aKgk>6Kzn(%WU7IukdN{jT^=H zml?%JkqB0=qSKFnHWjdYxXo-2|0WFLLxca9{1E*17sIK)Il2CtuWeJ(?Q~}jh-5y- zm+n@big#zi`A?g$UBJ6vo*P}1#{r706r7bS5tw_@NN{+oR@Wr8Cc*;nT(!MK={k&W zW3>-H9R8?&I=u{yu1$f$i4*SY#76D|TL&}P$wwl^i?J-`ejS^{N9W4AxV_uI#}CLb zzAps5gkENg^gql`OGDsy%3p1jpm$XQN1`%fq$-V`6;^und7~?FRY#_`wG)x zu9)kaax3b+pMq3f_?^EnfMA)FkI1UU5W7bpQ`NBndcgNMwQ->1WlL#%+m?dzU@$Z4 zhzb;%@grIm$HJA4=T9nU+{IxV)tj8}$LvciD@DP8{-~*U(i=1JJvTUgV6p%FJ!gS+ zAlVg`m0dcQ0WWA{(HAg)092uMbD6Rg7V$p#X$)Nt``PMKKYri;-2)A9z-eoQ6UXZP z>nBbYSoNSSn4~aLu7*FlXerR=!0d-i7fQ#|;TS>H^3ZzWYOof`>~EfpoEM9R0zW>U z4f+Y`V%i?tm!OTjIt!$b&SG{tZ_Ut9k#{d(NH#1itHeuWiVmVIKNfg@&igOQ3ZUUn z9WZSYeGa1lGjd=7YN(qKnRD#W;Ku$)1aR`5SI@*)ZN@*E>b{#IUa1#6ck8e`cEnwV z)**$xu&%&2XWS=as_tCan)eR4CaV+s>XX%FY4yOFGVGPG{kIkrYKuKTnn|7*{mu6>x2FeA-Pac;i`Dbm2!ImwB?{Rwpwcc~x0~Z*0%0%z zT6NqIPZCXzoOKv~t<2x3KLkfF7?z{G!Ro5EXkp^WerhL{Ri@n;gjd`ulZ&6|~ruJYhvMw&&%BQ*5zk^r8B_nii^zjRN_HN?bBO)?quPdb=8( zn&bY+L~R!LavQ~H^;21#PQ2*x8jI{*@<4|GOj89GHkGjx^y5Ii{=QVJzpeCMD(!Ne z{@Qme?er~lyiGzf7XfZ~3^|RnXRIZ^Z-lsDv`{UW-QvF`W-_w;J1@hxoQ6W^MXY-P`=7$GDGO{<~UpcK6K-|l$%J#KyOnlz<@~p&(DD&2H8b=m$ zgMTf1E<>0HPB%^2mL(;>J^GvpmD9{X#gU=s%zar{eT^{wW}2qBi{YE5sF;AW1Q)V5yHx#F2gDs)?UAVYfd9IEalOrt@9h z1JH;i9=fKO0)I!fGJA&21071PJ895tKFnz*%$Inuo?VdIJw0HtHNmZ)$8rXI!s(L> z`u|!Lw*6>8^@Zwg)}sIS$SqP4uhvKOsMpxTFW-v$oEMn#iP z*7G_736}9DcL#8=_$~&a+-s=)7JBsjhl5xx82U(f!%2h-CB9__R0CqTv0X*RxC^Ap zI&xBfMg>5u-lY>5)ahrhmuRQ6`4D=vkZFKdyF&mFX+7cns5Pb&so1+v+kf!{qwRxG z$Z;E+WWad}9XAe@+#=U@185-RhYaI7p+Ty^w8<9-Z(bP8Oz zvWH+U2#(*YYD2OfjI}hU0Scy38o#1Dy!Zzu2<;zg)%X-1j(|Lw@El))AQofN42`Z(w!PS2%u%B zyC=#jgma`A^c0Eet5o`60JKM8l8vf@*16|@yPm@{IW+jc;y))6s8UNJf7hx1z3*aW zeY$$_RGCWl?Xn(7)q}T6Ub@T&2C&f0?Rw-qFgUbAvAR6gn@{*dYMue0U z&MlGzn(z<6d%fBeTzwa&z>!#lsCkD>vvm%P#xRQG6&|?OZ#xg&btzE12iW71Pr@k8Y`ycF=mPQTn+wr zoO}~=T9Y@|hAb9nnTy}X3riWv4+l4n;t4)T1=n4yRsMEK%?rr#)shjWZpIE+R~CQR%qMR#mqeA{0;IA1wC+%!!uZv4 znnde|^M1wE=O49rlK0Do-L|d$c>8xja3U9OgB-AA#K;D7xYb7wU2a zZ~)DreklS-0gMKirMIRi8`_SiU<{#$;dCSca~mEh?5~S(LiZ~ECX+S`(@BpraFR18 z(ov1yCbMCitoIswN{YsZ2~5j0T$a!fUa|A?lQdvTQEpVnuuNoUg{lwZ;e|?9A)4lj z$C~`?&@>baRH0xw6OChN9SM;2KU@}8t^BAR@dLb>CnhY&D5yBuxzQerKw12-piOVBtardQ-kLyF^MgT z4qHeU9X>A3wwY071b}BnPeQS&6OAzWIE)JXb{n^1p0aWXAj+~>e>34yNKY7zM0q8j37Lu9s57qkn;u>=Ez$dS-3wr#>>(Y zk~NQO&&Vn!?*~aA`p3$We+q~2fYA3}G1i8r4gRWlS;(^fddGPwLPfFC_9c3b{+w5E zVJ!O^+;>snHYAD7vM7nxOsB3ECG6xFxGo zcQlL$aOV7Y!LC?fq+Pi5Lf1xlAOlnxc@%rDN)*RWjL6@45i@9unjsPm!ts){bjMri zci@LJJzo*|C|p>xFk*#^ILw(2IrvK`8qXahVCOiml8M&)`(xDdJtt&k6`F2zK-?ag z(cGQj=b1&}cC?)7M=pkLBrR(KT*GV%O+WQAy=&iZ$JP3smLx52Wp=jlE-6GzZ)0QH zna+@tZ#T)1iwe!7Rq?iIxU@p(Pz_D6jNWj(@I><%Dlj7D$XpB(&}Z^WZSogitzsdn zs_a(MJtH-rJdPn>7rQ85frdA!fR-I7Vm-M8I|xm6Tc+q7ReHzJuAwL?uq^0Fs%saD z3RLdJ-WkV?)Ez4sJdI>W!>IbOzmbzlCOQ#X0KW3%tYiOcLPih(jxq;pzW@EJ)?O0t zm^hgkbmf3YI1%LMO>v_W&LI|mMA!NxRS_W#&= z+qS6kUL_j3t`uDyj%G2v-MlzF%%ACksnMd-4TkSprj z{H1U(;+YZa z#J3Ld=cl;3M6QVU!ltJm3=SlxC3}D1n1~GirCfEaR2=ov1z4_qCB%j->q*{uLNzUPJd!35uNk}` z-o`%kqr%Si$}GgPh4l`dx7$pyBqLu#Ah?N zKHWo99S}i@_6s*t9@UVz?*@NpPg*zDw`4<&LF~+PSkdPOS1SH|`{qiyNmW;qAre?t z-XZg&!|*uV>pSj9jPqph%cEQ`zN)Quqh{?sA~V#z{ku*+L6;aZ$otP}Iw62B@A336 zmZ9l#VBkt=W-hB!uc9}omQ?ed70GvYAgEUgVG-v*8WO!n`>Vns)$_=+Q@^lyPbp=? zclUVT3Z3a2zY2P4m`rgx4eSZgp|K4Tm;;(B?^I=6CRC*1{|ui}ah5~HRQ!b{5Cl+zif z)n^%qSxnWTGzlJxt%DzgmkSDITAkV|O`ha&!U=wL&ap?Xuj;lLud%{P_jeH=fsei# zI0f9?W{aYvvduS4-cWb1UzIg5#!3U1N(QE|RXO*|`;69jUpfofdS*|RYmi#uY!m^@ zvCW$`{_8=D-NstjnoOJhMy234T%!K^@0gSl&mRXz zD12!BXZ#?XM(8I_Fr%9_)n73aZ|^^O2!!9oX!oKnFr z%F6afG?iSQ`X^%7_w|g?3Q(b*Ov1&x(UQNask7`Cx^IYbwvZEaDmJjF{CDRNrH$sp z;B80a0`dK@zbn$Wiij#2YP3iEenF%Bac$5=X?MVW_(GQzmO?AtDnfkgCr7dK+3ZZI zr>d+*DByTn*;${DIJ_DlCK!o8J9`0zrWMjIDnVsBA2E38kjiOnd_yr~M;R}QP9?n? zLb;1O50RJWM;a31S3&E}vGKlGmgaEcxS_W2?sMVwEBv9mYpUwJsvEHM=i_zV z!KG)Do&|^X8fy0v0LTj&P`-JFPi*r*OhrS=6wBO4Lj`jPR?kh$>h@{YR%zAOJ!@c= zHSa5LvLpzP9_FD>o+lv4*9RW`)iDyM+WJ$V(=O#O-r?Rnb9MYFuSI_slsP9Z)A0xC z@*rWQZQUMMU~Uhc@21U<#;9HSBvP02t@7jnLpo*iCWru+edFD_-7O+KbL_{Mmrd>0 z)4G`R$ZLl~9`TOmyMY?^PU(@u$qBBtW~KS7SvIF-tX&@yVi2&u9@`w)?c_;c z%CH+j5D39-V#*v+ei=+GA(nI<^zuMYLh)z??jOcuy0=C6ls5Kp`N!XkOm0{rdb|{A zO}fkmyGH=w9jqDj_4ZLSSGr`8^Pk!iq)dB@GN)WB%mAO`_KZzKJ(|6cAj$=PnU!_em9@Wkb2d628roU{((U=8P=*Z8@Ukog>7_Q0Pp?j@n% zvW?NV`rzebcal#9{@8ZKu2Yk5$^aEWsb!IPDn+0X3YqYQ_ml@;WL*F;l z>;oOOXuDbmjdQK4L;a%=3o!(HNQr0ues8t?fUT2D`HJa^dnU-2C_EGp(7&Fe?_fRN zXHLKyp70?4UJPSYzrlh?a%<9~$A{$HlOd<@GsEjH*?GqbzC2f z&z*KhO83vSN_9(Ty}AOa0MGsnElP}TSD)os4& z`cb1W#j^Eh0%6-@o(vKY&C-#W96zjqy#h!+;hiD#(vyEi@vLSd2h*aqIj5Wo#~~Zh zJ$qC1FTTMVF~Z>SjQU~Zf47dA4M3dI`Q1CsDP8R`?c`$ZydR<}H%}U}@5~vusRK#* z(Pgw&*fTjh@gd>xyTd+F^CMWWG1ph=&QmR#xmjmvBepeTCF%1MJ=|4Mefu>kqHX#s zbY=xzt&?ZPX&)qv56EB7FLu!P=$gqYm;?X6zvDwdkcY{4!#)R{X%=cS%)6jc$3~|S z3UD0;4+1c5dKtX>Ros~KrC(3&HLdY(b%r0lUNb4PIHX!3@x)_iO{{3tNOpS@u}$Js zvdCCFGoW?wZm!T@hsqsuYgfN!F+|LU+bA46e#(qEdz~8nJnZGWYg}ULBMfmKzn8(1=Axn%DtpZQIq~Z(_~!o|itLAHiRNVHZH< zbW$Gfq-%7-4ncH;VA;RFr&n+a-rcMu^RaNRKsMRz8XlECw$i0_h(#o?zcAl{a&UMS z-?pUQzW=mV^7i&ji_#gor8%;}ZR2v(;KLrj@dQVqb7&<7iDbg?N_DbLLekRNXTx^D zXR*`@jMheb6+(`6DkteY#$~>rO}wp@;i`!;=Nq~17@V>gIk1+h-qK{t=wf~oZ$s&# zHujRr&NpCEN!-%SCvEI*iirDo*=u(4kQ~AORHgVMYuIxs;+<&K#_kqH$)i}7zbXfV;4iPb)kaK&cjWL}E1 zmzzH#nRJboHh7k;-Q1vz*vF@e_sZjxyd=b@n4e9qjp0eWdXvyVdj>b(Yun$40rSjW9O=G7U$(b;uSqSNjd#&Sh2){+NWRvf-Vs z^Go?!+Rdf-+g$S4jx&PtUrECu-|jDm$HQI`-TTIZmWVh|leZd8j^SZ>R-})SNz&u3KpqwBqcb zJtoer6SXo|iVvv%5Pwcgj|{WGE4h(?Eo=GJ3Mco*+`X{jaN8>2p{lTJ3V5OWG*aUe zjr4xTlJKz@E;Ra--H|#gE_t?P(CJ8b%9^_e{NhSICptNG!rG&>S$Z0JIkAy$;xt;~ zosq7I8SvP9JFVkAVs26``D(hm=1N-uH^=-W3` z>CWoReVig>SFA39GSs4CpT}8qvFR~o3JcJrRPC3C4(kMvZ~sVq;8Gz& z`DH%sdY@zXw-WGW3iYup9R-%EW@ka{2uXAd)^V3l8i?R~ynAF4)GURNeMTw2o8Kz` z8tnJzER^`%wEWb>CY2UsGTEGYepyN_u@y#eiyno-7QjZ_cMpr6^}#9TqL89xOJ=lm z*rZ;VylEC-K55`BFm&=#&qYFR;!CoY(0?hvA{ZBFc+;59Cl%VvpGizG+Bk9OJZww{ zPN!iKN~Z1I4$vvTF|S*3j&lF%{{xmM(5qhHNZJaV^VY^pZXOpvMgY zkM3san5s(O$UeUz>9YR}ab7g9McT|7a8>w;^Pv_43`yuunBg1trc z=jV8FX0Y@*D`cAceDwlvr^GpTv%y;*c$Cua{?gvo-Ptn+*O{pOC=^!|jHwWRhgmnU(B3M#xEQBhF`YO-*TkMFKo z30b$K@a19=sMtE=Z=?2xor6PiJHu9eG~CCkifq`@C9Y_P2f5d3N&_ilk^AkR$A-yo ze&DDVSbR3R!~qY#e5 zsU)slkInx8b#lS0`jwyTn%eswP3|!4+yRg+TvF-yF!t_tXGa+*Uw?CdC5Z}XB;G#_ zeqy2FGa+QklEc~=!sMUL=2fftqQVt0x^n$hY?L*=?l`)PFM6IkVg15_2!LE$f|7Y- zgCvosXg*DYWTm^)HD&Emm_xk3^*|Zbir4M3c8={ZlNl-ZQHq2^h~rH`vGtKySRp*8j)KCaey2)T4HM(pu}cA2HyIm3Q(Zs(9($Zga*}OsV+nl8DAQ zfXCi1@im3PU%FU8`p@*`a3zkw4X(7UsUsH)5$`gI&@hvH6P(#)(s~vVu2lM@>+jLo%Zrb%gLjwV}9E#$&z`z!i#>1=;KqEJ#~wb26>NaQz|#j!Qt{6{cqD0 z1ftCc1R|HwCxZ4LOO-Tcvq`aO`$=^q5TKuQN19(2RPyMOPC!jp?-tfaDrb?+)phnK zcgVINuW#d=*fKY+d3J}jHdkrlu1w%i@iuQ*lX=Y&_(@*tM5th_RM!npUiEECGhlQE zZZ^BQP-=rGdiyg|COZ2YORVzVwv6fDnrPva&hV-g&FB|wEb=;IP{Vux*P24KaIV4y zcBT+VV9n|?9^X+$Ez#0}&wb4+7u3T2OnljnZWWf}A!Eb!3e_lo%wXo~x6O2M{ho=0 z)s|rv_efU#c$?zs-1vZUI()IuKUh>_N#a~WwDOKsU>`U*eDR9m)<&LmR0}Rx3_4(_ zYs)>27sVPnmAh0MX2pNBcNi}M0c5X?xvm1rvK7rnHDXBEMS*nH_++hpIc2CASuWw3 zuCH4vGDGt_CwtV4fG|#$1ZUBxS1yW0UJIpn?nISBtCz)&j!iD!BT+TZ1b2r3y!#53 zwT^~Yo@VDaiczJvjzCrO5rTvb&aK9v>!qGvn7|cbieFEd?0|l$yRV-XkKFyJHPR+s zd&}9p2IOi2)@$k5v}%HPigjX`00NB}{;jVfeI_x?PiuMXjvf)aondLZRglsx5tT`} z>ysfW=nO5JmQCx|nhYiz8nt=18!vNbx0(>c%MHO&ksZrL6Y$D6bI(m73E~BQnYE7` z2oF1$RVAunWLc?wlPnunVep?l3CL1NDh|G!hD+x$Cc1-PoLv}SoAk=m3pH-eTN6rf z%>Z;4Mx9ROzk+#ia<51vl=hF(X;>}YE^xg3{x*nx1hG+3ykRE1f1l*xDj@!2M=hc1 zb+b7}XId-G9R~Hx)b!(0_Vz*~A{WU==U!mZ=kI=1C7=cO;$B@BrXl z-mee!PRD#ylExQ*#qa-v4pBY6^X)9I6!~hE+UDn8k>q*K#$#k-ME{{{k4S&{&f!Yo zkR1eb)W`fyOo$pylUv9_M&W8(;K*iusB@kzCgkYtD0VkhgG1oxnWjasd$i)2gmLz9 zr^hQ_JyYIz8@A+q>UK!{w(#-5wbNX$_rBV*?h5I!80A|hnL-%cpXWP@;^XIVt^$>5 z;Zh?_RB(}@xAmwWEpdSMG5h<=ux&;)j)5}uw-Mz26S3mAz$ zT176@S+_RFw`cC}0%%zDgHUmQDdv)&yDi3EKhh4LX`0k(V0y!hbFGSqDjYiqb?IdJ znHZCau;evq zFo(RSTqku#d6?*Qa{lAm_y|J73Zk44a~gvGt$s|wL9rtK&n9XAPqCb@n-*^=e`8C! zZ8x9HomEK;(HuMlbM@QX<~m;KuD$an^`g%0%AnP9Y1&h^o_bNcLs;sNnAn^5OLprn zP&rcbZLzwA&x}PEIwboZ8QlGyoUmN_%^P-pjqJOhQ#$9*KazKxbNcOzc8kvhd`;oB zJs2Yx4-Zdv4 z1czpv2q~JoUcO=9wyhlL>el7@wk=jl2HUpwt@B9VX#5r!0({SulmGWUTK<2d_9|Cd z=!njRG$w(TTRmKyj=%vOI9ia)k|Vb5+a3E`zA}=voY5S%7q<0yu!6^c7?@&R8vX}$ zGb5=74F)v`CCGd3+7qHDG!Pn;^>6w5#|7<3YHZbLMXm=Qr`D^!bD@9Q+e@@ zZF3n#piZ{P0IIgiXk+4KgtTG6w4md^Df66|(0c zLv^4~n9KQKy~y3lFBi*^!V>PH#yn4krhx4-WhGK5o`NQW1=hk13RmX3%h$5QwIDHG zOaq2u>FGrGyxf}*e=W6 P#sCDKu6{1-oD!MCxNC-FxU|?WKVxmGaU|>)_U|m=M35BlzAttQKD949wEu#g|40<$-U9U5#^ER_WPcK49-NP~h;x z?|k??e3JND-iksCh{1usfx|cRHvjwmP$8P8cSn78-reBP{$P@$H{8YiZ~@wBZg1~T zdG^@815-JsuOuc5Dmfe<7zBz37!(m0`2T$LOu>?#(OZ6ma+Unb|36><*B#)*L;>$; z{q8Rjb52P3-KWC) z{CO^D%v)~u^T#Y>E=imjHL7i^?JYySsgb%!$_64R)7Z?di3u&(ShQ#KB zZ%%5PLdmKIUx!U66#U#b^X?csGIwci+GA5FE8FL)l^h*Cvc+^Oh>F7RznH5&-shsf zRqOvzY}!3DcFN^{Z;7OfeI(qJ{OBWQHh5y#&tOhmA&q$aEGCv!N`XW87vFLULb>x$l=`|>nY<15Bn?d$h9 zAAZ_3B_@mPvC_^Ng?>1f}U} zt(!~TqPfyn|AveV@v$2>LiQFXGo3I}W@d7SKt`Jhib(4dIDt@~W{j9%G?L6WgRD}& zFGf3hUAz8_f=`fbiG@Z%;V4Iy0fCUuAJMk5*LJJ9vtoN2QYHFbxm8V7OUaQ1cf&^* zOZC?J(j4e z#dxTUvLTgvXTRM)+tBTf2b&z#ZJf_dP4ec#ns?4vxagUi0kD!VLqi#V-uFf@Qz;C-{cRRW;@FbWOzfL9h?az3|;lO4Y%*O>_0d5x}H)3|8~MfV#WV0g6`_CCBI=>G!_9ON+^UWxtzHN7o|e( z(Q#S^JXEYt(aju+0sX-TRrHipq2kZB8b$;#dKUc_1B43CCIr0O3c2gMqyt*7EOp+% zE)8}i+#F2slqM4rd?+u8l+3&kp4(2A!gs$+2g00q0ZGa?^b|;#rM=gb;ma9+mKc zr*`VVzz?zn;u(!c#IAF3IiI+WVCKSHyt=&{UD`u| zbM&k(RwgY!&?A9k-n}SgKIpsipl3yZdlt0>#Ij|(=Qu`8E#0(_?s_yPnfBm4Bo$5X z=$MCrcW!|Ryu7k5V~0OTmLmW&i9yHZ&yG*gE5GyNLo3Fol4^EiUXc3i z%>C8u2|1}G9v;#;YR2PeAjv*_Hcq&W50UaZ_b1Imnd2kyI{1iB+2DqnsJj1b)GM@8 zTOv8GXyeZpG=7p&NYFK4#9*i>1FDc2T$C`+;kIQLqNp`R;*kTkm8dvq6$%B;4cycx zvglf`G-s>3+Pt)a)iy^55|}JVHQ?mHC_L0r&|#k~Mt#(4uG$H|asLe>Q%V$lSLrDq zS+PK%!fV^k1lb5-K0eIiEZ}Gs=z@bDAVLuZbPGK?!KVz8*pd$sqEBmb>?gjD*DJVc zJg)ey(V0i@HIRyu_-3btaGYZA8#5bAy~V5Ls8h2&a6kdX0~?CLFATfrP2aP@2vD!) z{vcPlT*-D6&6m6ijlA@yV)O)q`U}5w-<60^#sB07d_?dmzS2bMND3=vaRIk&pg7*fRn87z zTjDcs|7aihY7_wgn4z$xL|A#n=AygWERrEA%$WWi!qfdkM$jFEc`1d&R4~uz(t=0~ zQJ-CGK#kr=u|v^71#B+`MXtdqw?VPeno`7KK0>kCDfXgpS5!W}9C~q4NYJxcnA4}T z@LwOC973`BLAqX&^xU2bt=hRhbX=x{iJttx@Z%|7dwQVo@lg`dK_Y;I9$Y}5Zn*P^ z;y$G7a%Xr_$5Ca=g5sAYo9f~bYaUQ!+L8xzkNKZCMeaGW*dv@AqE(lYg9!DRh~fBF zuybkjtw;|P)+^yB_eT+UO8LW`pX8sEO2iDj55FHTyiGCcjvh?^;PsGkuU_bq&fr&iM6kGS`p~rg23h|OT;xuUbgsX58j3P z1_vY$4BQ=vKWm579;eSZP=tQnsF`Dj)_%pWq5o+EFft)jpn@VX)jTW(82|u4iHmT+ z6S#&6&qlu^8~#=%SPvf5|MNl?-bw{9&p5OWBWY!kPDmzg>KD%&kYGMAcp%R4VL=K% z3ps>;6&Dp;%GuBzlt2vTsR0EeDLJy>&X8kEFFFkk(l8GeP#kTpu@OH137G;1U4%*idtih9`;Oo9*#~eT0t9s$e$k#hY1QhJXUtzwbDDA{ zav=ziVLUvL0r;H|vwV4e(*v6Sm(Z3m*SqdeCPAVM=r=YxA7X4pn<1o~p6>U254fn7 z237}z8ji0e8zOvw-ACEfu3l;=P#IN>ggWbD*AXJ_SIb z_h9Oe1H1^BA~Hnb$NePspFA0gPQq^I6D#NQp&mw59?H+)@SuPfao}d5&VA z>4r5H-^09?K}W@9bSji~0Ve$iBC*hjNN;2JN7)l1;K+d_?zL@_MhB|6wNSQ@kg1=(x+5m4POnVk{cHW4=eOvHcqknfeQ94RbQ{EqiCSCi_LR|$rWi_YC@&#C z;mZl4+VC7uApZZRV1ACDmaL>`sEC|ff(^{d;}A-Y8RZ0@W$?LPYy^tMacX4y=M*0c zJ&Th7PHc7cG#0vW0&UJ_9e9PqEzQRcaC4t2G=3EEs4{J~%{CmJgGz zaMUR~ppl}$J;moXc|PoCY*It0(+1X@mW3&u_0@*G2gGzv`U)gqiNf=u>x;DkhPOB(q7>bp%Q7XtTd|M9qfS9MCpEr9T~f!spoVz1z>fv$ z@P3vl(GU#UzBG|66=9UUkX)(c~?~Fy&del})AE`6b zlOrjA7vpR9CPeI&`j??^lokwHp_V?UTcY85FbILhTP=FfbYtZBETYA>zhO7ZzoIuE zMSwOc-9?DcDXs9WqTwZ5|MQtN2%5FNsZI__f8fNhdHPeONL;~B8A^ii?r?liYhG=O*XKj9~a)+Xe5pnWK~ho)NqiBw5U7>F;`>wjfmC~`?GClNzLt`CA+{9 z9KQTe!-qKkbL*XKR#pe)UBP|KK0^0(_`xQTLr&`PcEtG`+N)7U&f0807Otk^N9_1L zIMbRBM&*Zu(4)UD#kpk=O*4FsDn!VB!Gjc#Pzb2QklKG0ga{%cXbx4W?|-;R1Qu$) zyjGM~$Ss_m5gy{C5G+zlX{?pKKHHqjsV$TA5Xu%B_4+=Ty0>lICF&lEgVhB)`Jl#v0K=C8Z(He<JQmgu3~l!t1zBu%_R|vx*}1n(RgRZaYK|dUf9=Yk&S2 zRhv)ClDSi!r>WDB?m@i07NCx!^a-R;F6V2Jf<#gMJEOd!GNDje?{63j8a z)9F|x;V;S_tO8VpxW`@iKF+CvXq7KBs=63%Zlf{|4Y6;2biJy%K($^rO6Aq!#(OSq zlF(ZS$)rq3=$}L3TX3G7ie2Wg?BL3Awsi6L>8tAH?PY=eiSXMIZREci z@1Fh;%3va<_oG{*_2xo(wW9t&9KfFKmHT8kka4-b|%fagZ^qHj>py{%*2I?|5~La)+iqVB5}^(o$g-m$O11kyG$s z&lZXnkF7mQ&l))~Fi=5WWeyiVJvXS(RTC}<`-nlxl^0$H6yJcVmWYUF>w=gz9E2b+ zsL!M=ust6?LvAtJyXe>Ukk}wV2tc&!^9P1OooyGEsfBBgKtg26mDiu!oxEYAk}}no?g>HSX-56!w!x4pV^|g z-?_ROB%pv27txY1_!KUK8;&)_e7DWXfzJAzlyR;uBWdJSDWAYLnVPINt5PS2 zq#Lw>j>}Uq^pYRGUh)_f_fjHP*9pcmGP=fy6Q$k34pk24RvCvZSVbJ~dCJ!Z2Ez(| z=}eIN4y!gF{UOUYJiQPFzYT}4H=x?>o%kWK5dF6u{B%=kv%IX}TwRtZ?+i(a z&q$i@=$o0GrW5J9{2Qz_q0Z@F`8M^`jVVAzgd*Ykj%H}oTwZ+Q!1=wa3yHh4Tygi& zj8_{CfH5P-6*tHNMvVm#+vqJ-w5tAgl4X*b!A*r?@M<^ za#5Mj+W1c*2!EFO31h1&xL7}0NX%@QNC>IDU3X33r$G^lPvR2o=Ao@*^Ky#niLlZi zS(S|lKJM-E#$SKGXdkHX@U6b8>!dWeo+{bV!>AqYHYc-Aea=FE_54*Wku3Y=gP6*^ z(-d2Ni2f#vh(Tkexbo$ktIS0Ky4?aj#o2b{n8FoG7^UI*!1?s2kX>#(*JGx38v zX1ht+)q*4(-GKVdl11DgJ>rMi@HzDDwIr2~HZtFmi|7vxvZV5a-yz;2qk%F7U&|tp z_pfgemhPUjI0ks5U1-vE>|DUs`v49cj$ECf#_z9p>9VqZO1vGOR~;z*G<&-P&tX^N z8Aq)ez?;U+)k;o*98&$I*{K5OH|7lOqFw00SLFNZX`J5#Vwz6o7S%1%=KSg3bnI#| zW7y6#ekE`?d%;OmusTrfx|NHRmA3R|93rRt+$pWJ21&VBg^05%&Ox&81@|W<@y{^L z!}{(h2gM8>ERav(n_^yni=8EvoTPVajGxV`H@cg+h6q|D+WlQWDRlQqXNBF`qOwVvdW;DJHG-MqJLm z+z+|Z!YC!a9=mcS1J(J4Oc? zv899Qm5uy%d|LiaQFWt5fc}C~K&fDprwRW$MW~Q{cB#RCkBm$7?7ETjy{p-q zccFplYpB*#xU&A5yn-AWj_yqw*N0bw(Lbp;AD!i7^yC{XIB@GTRtY0yCyF@E!+8g) zgtCQC(vr^62?#%l3F-tqYMkae>Sne<2>31jPWm!BvGNM8R(o%o;iZs=bChqfNv3R2 z`TVp7e{$HZWWH(((tCfn(s*|r)|gp%5sjGL1D>m3-a#QrzM*ZT&DkjcX5C>57IHd&QcbeZeXU zovdiIP4cGAcyeENt6${uSSySPS)tGl`7qV8v3$68pj6JL;@3j68>t<$JM83sngFx57S8xBaPzyXa}=^6*7f`dZOd-&Tr1@H~`52Ze6 z^c4z=b*mw^YwF*foZdW;6(?-xjw{WOAU6_8NVKaQ8x=}lJ+xI)%)|IN9t9t6%AR{V zW4cI`7nisrH>!VL=lp_7m~65&%5QSo%hJE|-F~Z;h<3g=(QdTW0b%`v<^a}p#gs!J zR^Z>AS2_W~ur*;}Sp6s5#%yu0$3NXwitgF$m2{ZW-TV)gKDQK_FiN^@_BPS0M_sAg7Fd4sT+S&7!Llsc1sK!GOCw&-MH!jiu|S zk~U&dYb?2a5~L-DvN9sd6xr-ISz&7VgQE1afB+p$SFztG%L4`Lh^FT0QrDY?d1%E1 zCb`r`Ss_w;**`I>LTk%btlv-tQBP?n-~J=xtNc+xqcUsX-pfMk)WjIp-ls z)l!j7M#6Q$_t0|wAO;LrPE4J#g!^GNNW*seL7A+1hQKsgQYSuXxw4|1fabD_vf01E ztL@#iIy@J3B|eD*Wl9^_uMBB3&vH|9X3F9!JFNG=WE@hfDI4S8N4BQDE@~~_;3%vO zp#1Ijhi_xk#V`l6g6rzb2fI48+p*IX+N9}FUucO!WhU7O#OR@jyKCq5V(cj`X~#6l zB>Q{Oz@dOtLCZ*`^C>}D<=t(0_Wbh>V(X|h`n(D*j<|$oYQw$=JAJg``2WkM3CZFv zcApZSSF}V+rr`F}lT$DPT+%>csf=MkR<2&zp%f#J`p`v4IKid42o+cT@K*joEtQ)clSvloUY9T7T zQn7s8#k)$wM{L2-Up*y#jAPhOq<|G`r~1n=A3lj69M?Y<+DC}}@;+VKB!3&lX{g?? zf$=0=*cKTS4xA+;n@s^;7iMU@#Nm%>?o0bnSLn2|bW+@0;r5^k)L z;<%fyY`42Fm_t9m#xih@yMvCcSX!{DsN0=8W5+E}8R!09SH2$F4a|3Ts|^ zQ+U3EA}Xslv}xb9dPdbkS5lgMW4$MgWOcRki0bsw7+FXl#`f8m#eF|`Y$Z8rE7}wh zxYU@&)Vsm?uT$?vj}_3cxS9I<>eSzx5=<9n=F*$2T?4yLV1HeR^j&dI&$&ju`ERPk zc;9|mE&7vPVie6|VMalSC6=t@8WCzaM|F%TdK1A;8;fM1PGP)?SUTK~iU}3(c1r2W zy6?7KGhw%vN=yA(rnif#OjuN|qDlrz@oHK^SytAUbcl#$Ak*oMD&cowp3EdnAvB5| zZO_odcG!@~{+3;))4>niFshE1EUx0%xR!9_$9$a+z`np-TUxMGgwB*G?_ca!I;y}@ zJnCsf;^q=VGbiL7t(mmh%*URx0)zA@Im zHioo0rP)~cm_+iXvP_~uUddG(aiYxg&o$9XuI_5(R93S_akp~x7uR400ygf)U%hJ_ zaGaLDcIQ8W$O43CXz0{&U`2J=X#VQokvFR567wTd7i20Se+RX3fVJfkFLku`Vx;mdsSmzaA)JIaYXcAA658U>@hx{kDBM^1Df{UZbv+VRuq` zaztRaUVRlozyQN*pZ?^Gj{3G514n1Y^ozO!b)_q`8q9mQz&6W+(~Zm^&p=LG^j}!W zOBIu|$vK^;hL9TdNd6^SVY}P)w1h6@qB@Z1rgPUy{1QnCpRfvJjImvXlNh*b^A*OU zqcsV2MTdedsWlXPVXZNvxZW~8E(8=n`yU@9L4P=FulS<2>0+5P_l*;+Rx?oCeuHUc z1uu|&VBwr{D#~DF8#PUXYrwDj@>GG_Z%6@HPlKUIjmb^uu=!cyo}RaIz3u3?=;dXV zZTJ>Xt?Fnc7LP|8yGLzidOS{j^Uw=6 z9$jOX@r0G`)~a`}E{Ey>|8fX>*5ZazjC)m3GO}IH%8u^~Zewz=2si!h+IQ{WB35BT z@R?U8uDnfR3&kF|?qstbw4LOZN;Xli%U&NCKq;nedYtZK6l}WrV8~=xm@bqQLJ{Bj zP0HzJ_uM4N>OrH_I*%-}Q|0FB+kP&U>%w5NA8T0dZ|=y~FKY>+VW?Mx;t+*al*rGE z3#}B0BB{gClPy6?roOua!PD zy!*xD>fFr|fJ9=RYe4(Kks5BQ?if?rd=K?7asPr+K{e6lFY*0VE-#L-GE+imrId~? zOlkL^soko2sv|WqPpv&`z9FRIDSrX|{VCq=Ws{QlluMoH{h(ox@rbT0?cT3rKaQ=< z(vky-n~dr|qf=7Jhqs2{_nULOTi(eQBo`Sh?H2hG5^plgFR>7CA4SQgPy65{T=~(y z<0xXAJD;RPQ6RrPHqA)(n2>MCs);v^W_nmFf^Oy(QL}OTJStk4pJMNLeL;ol+LIXn z=>rX4t`x?c3E6_lpWkS>o8lfUk({ty2itfL&W+@2v-Km#-y+QmvmBZ(-{Iev$Qc)z z=+es@68(0d-Zk1udwrpDIX>R2QnD>;=65gl{l?kSrrL9C;dRUN9ucq=0p)CYyYCIL zg#O%|0JnrYD*bZ9+4*X;4lDrj@*%yY%{>Dy=uIN}`$|^3yhO=CYiFegmDy!1gRuh6 z?mT6t-xH*V(MEGu>FtYmlQ`yoFs51!e~WaCg=c5=?GnyYlv;dMDbYIIh2A;JWRD0# zqdtx|bCuq$L_R=mh0?&{!5nVT3{m81a4V05O}mn-$qKh)?;DHaR85%w9sZ+xtRw%2 z9byI7msX{}D-#$@iZoddjoJi9 zC-}2%v{hF0-8Qu0sHs=wlAfn3lo*Yb+AQqthmR4=xG>t7oYQSt7`N+<#prNBNK$+0 zL66M+u^rdUjCmYN*{@XN*2Bx0ur+b280Nn1c{7@my1Q^ScNL8Vt9D7-iu?F-Cuz*N z=*ZVHUOslI)_N{5ic1Z3%LMvc8)J6aA5HO*nf7)<+RN7`hJM_nUC2^n6~HKl$NZ+^ zdieVML~$cGdoy)&anXsyR6kQH`!!B+e$%|&0T4WggJ9mzX>#d|xMMl73bU|8*$-S1 z&9Gd==``mf>@H-6xOeQ+=@n1#%gU++@usZDks@(TXE(B}o~B>Z3x*bBOrZh$YI&ae z&s|A$Po8^J#E7;uyk}Sd%zzqu>h0YRE@lz@zv~y7Ppq{a^NH?Z&d%7R$aZxJYo9VgvI;wp|evEkClGkw``k4CVI6F{J3W(IFZtSi$vHK9_Np|y6 zR=YWOD||Dz>zogDmB}k`ccE`|9l%-7wBePxXi`YYbZ|xIB)p12ZriiSo0T<$qnjWw zWhApPmz-3Z-K)C00k1_IHQHl6IhX&i8Ooh}>V83yq5tK}B!s#P1+wi#Vo5SRBl~AA zb~<2}5chZY{abnV@_nhUl~s<3+K9*yOFc0>OwuB5OX4d1YyLvhRLUM@nv9T8z*g)@ zpB%FG?v9;9zWj`!dz{0QQ2SzAZ8|YB+za#9VzZT%7RakZkO}w$hDJAbS%DlEiu*-a z#hHJ^#9X-o&k@YAnh#ugVBlZ+n+uqHh{TqNr0yU)9tC-^$(;u+9>`6T z&J`B5CNQm|8oQIAsh|8c+l>>k@Ab{65)MKqxO(gMwTfe1gp_8)Sf!d!;-Z3i*XQt+ zys{!WBCphU$!MVhZON4|rMflxa^ta+`<-6yN|U}}N8=qN((AGEX$HR8#vJ|8%63|y zhly&5p=+71ML!=V>^<$shdCK1vb=ifkE=LUcG~JWbbVkodwaMl-WTJzi6uz$5g7rA z-5mx0x<-j^PFlB6cRp1OsZFO)Tt}lIf`Kh6YmrTyGm1MT`1M1z(k{Zs)u<8lXpH@f z1{Y4`j6J7)Hr$rk8DGQEP(Rs`8-x+6ysdum72YaJ>foc{=P=Ng-DIjF~?J9F$3MoQ#XMx}rXIPInH54VN_vg-%S`X39T#B?Bs>aK!R*@&&e^`6d8n~3+a#D&*{s}5C(V{}q zSi&n%rT^}}X@X(C%@G}ed~Rx* zvIXyivAmw|C(S5B$2?SbDA)5;=Xc~EXY+VD?#^tE?0B?nvA!S42?;zlr7w3!nzIQ9 zwCj#1c`+OeW}E90!_-QUo5svJ`YbavLWk{wwfRAr`G)FtB#TFKwau(=oiV#4#^bqF zceJU#DaDi_fp>=(iw@mO)g_#(*9pM4z;x`7V|NIZ4_8hJKD02HGGaw!1e)coU9>w3 zbwSzhXXG*qzFUQktId3xVUjf-R9{`YDtK$S9^1YAh$%f$`pM}gB)xDso0}?W^Fvqw zG%`91U1S7n^J6Ox&qD~$;Q03DLc>Z*-Dcz}$x)YP^d>Kla&PR&C0OUS)6T9r(*8GH z>-@-2oAusatYSrrE>+0$3l)5$M92PgWQ$p{I#Gi!5(-K|+~y@0O>4C~2l#uvPU|Y8 zgcfx<^goD@UOKe9K!?^T@h!y@#RgzPz?IM`odNWw0NG1cBC^*VUPDlM$dkFS3shC( zWgoQTsHU}MXx7kT(aEzzCCUsEerM=ON~La%i!R8BLkZr3cDuxPfZP?%i7sQ3)N=m-jeFe2i&xV|!ppPSp^U)8v+QZOFUW z)pc7IO6}`099vOB2m*-9yW!?yQdfDZe~k#MD2Pe8-syFHK4PD88mQNY(R@2wLj8A| zcBZju)oa_Q0}fIYNI~{oNrl-^baVvd>L zk2o+(`R90>Ewk4+v^WXb8hKlZH1K>Y)!P13<9n7rSYa4p2sphglhr)t?;s%{_^De| zArdcJ*fWq(2C4HZ^59^cjsjVus)qaOtBj?l#>yp=!-Rs8W3q}boCfR5w4ZhXf&DJ1 zI4}qSPkUi$g6H_?&nNWC^pht%1V2p}E3Y@5qZz_+Yu+{b!jN^2S4@|neED0IizB)q z7wMy;=?B>OnV3WZDzvhfMZDvelt=dwE(eRXm+>COf$cJWy0cypkG}t8rjkEi9fYpBcTwzuqfP*rk}z7_gN3N04v%B6la{>tRt4yTk}5IEXHusKK}#MzgXVJ@^LY`hr9BBwB3{S)WgJv&EUSLk1Pef))wqk0NXF|j^uvgT96{y11|-L7&FjmZxW@j+hsQg+&+GF zgNsol;_C974BTjRp=5uOo`L~LH=?I~;d?Dx4}S1@lI;bOjeUU5dk_P`qZT^tcUs3Hdt?XN)`0r^2!QLu zkjSSeP_x|^4Dl@d2+<$8uT@ii{CTHSK+=O-|Fe)M&09graXQ`|L{AS?fAODuxSbxg zIF!%O*a8&3{U+vRaPTE`x+~sNSa|gnM2G&YN z1hAh>p})RtXsd&(`pC;In6xKz{o@;HTMu5Q6|u}@Z+7**_%S#9(wMar87& zp{e4TIIwvXsPBpk5Wm2nT!@!uDRa^Zz=<_uR-emI0B=w}gJwR@YZWaS@LEY!_e~BQ zPDvF+bbvC#Q13Ci9xPHlQ61mN0fGkL@c|xJDR^Y({2yQ5ERfPa!{@(dU}z;u)&lY5 z5`=Iy_GmWh7p=M02q3I^@Pz`JI2ZNC2O&|*9$eo3^}?*$gu%s5${m0iF=!=3;bVgg zDxHyxO8!z1u85R?K0HDcb$~MnS9}iO$H5=cy2_#sZcCrYYQR!U#bj0}`9trS^X4*NLeO@MIrr%}>^vvJt-{$`8I)LSX|fmqAlGkickO?Y^1*N^(G^z5D@`uY$iJPixzPpjG^FoyqO*j1r5Q)tnIRHbU>eFS_FaRMc(U{k^?*0{#Qt7sGg7-yaL^`i1f z@r?>FdH^{QD!Zl09b{)(tWyrp&LNYS%r4kkdEIT8maW)Lb$js zI&@h|7!|;cT$Q%jx$cz2MXS0ZR?(s2?3Q5vtw~I^bhABl^SXAgnsDG@-HBZRPeO&` zmiMNdYwvpn*IsV@eSW6iW6oZ)Iud>aA>Dg;)K?LQR$OR8?#%uyn~z72oe8w%9C`vlU-qt%@O9LiRMI5SA4VcCEp|MO^v(-cpu)l5#Y~z_(3V3+A^2^k^3arLk^! z#(3$m$;FwR1Ujw@L8C~c>CeB&% zd@=`0J*3~1Ts*-m>t55a6=OaX~ zo?&9sZSNH_Cm17Lmz||yz*^>}7M52O2>&n|NZriMwp*k9X)}NLEy0vPMMZ=f^|$7V z2kpa2?}!|R`P=4c>B(P_``^cLLyHj32~#rB{sNU4IO7Asi(jczdF z7EN|>+c8zRJZSg8N$z`U9?s1zk&oY9b;`bIVfUwUe{Z@(6qnO)y|wCx@Qf12cY-R> zZjSiV+{&!PtKYEovrc30<@C;MLMhgm|0Pr!B1DXTTs6m7|c6GUZD9Z|igE_u=J>;L|BN`BT#gwxB@Q9&gs#-VPr_bli_R^+6X?V&f zhn2zupc#NcfWJr4=B%HF;#D!2pAj%txpyCf!XZ?ws}86x3JiJ0L?#wkg*{mc0m|b~ zHInnA5K!m6K#pih+jFyt>89Y@-KJDmUq5+9f;@o%CmR!S8>6Xc!o}UGJWNiSH>uwqQS=#$bpnTR>0O|r z4h*~*@orK0`kt=+_aaWw=f05bOY7Eed~CZ^7r$-FE2H>b@G=jF@NK*DxQ_?44L^HB zy*vl!ll2r_o#wxqKF`=g6S}Ps)A=Z0@Rm!O(ei7DO1;nBX4EYmAywBK=c{{&Ix7M3 zQ1x!NGKQ(*skao*hX8;IuLN4)!CPKSlOW=sc@rQG4+_%0KiD)#B@1E(f0jF}CF5ly zhLO%!EQ%-kDIQRoTly=FB6fNZ>!G49rtDBnB&0BLc3RLcQR&$i0kLb)H)s4bYms2j zD_o#97J*X0&q@8|V41P2$uqrqr=Z4-;7w9*q|U(cwV<)?IOFW}U2P+b%cpyj^z$Lj zh+lmo@m(*AZos;D5SSOsV9BqKzCD+gKxL8-u^T$xhuo-1v`=aBMm^~Y{;*N61m`&RSzSjlNQW*ASXt{QQ4oNlZ~vuG+@=!dG9HsR(h$3FK_QlkO0tNu4C?qe}{ z?rUq$9zWiqhiP3K(Uqahe|wM~d_9Ojl02aY2M!8@9H4=sq`AO1=^=yfVevrLFJ%ZB zYi)Qx$Ywml2)B@1#nTD1^y!}6vj-dD{zC2jwM&qJZv4i@Yp zIpfR@qgUTq1MQlQ&ZPdO%pX{jKFV3M$oJOi7N&YIo#?JVE-=nm6%d0fFnf;8_a!A9Dr`hs;%7IzD^pz@ zi%eS9I&jpjz@t7LpL&9mfE18QhwC?TK7e93UyCV3B+`Cgd4AwFy=foPDtcCz@0hOQ zE7|A}sbzkDi$d9jSp_lHW_!_y{IdWjI0d+nB&@i(`tf4!`tI=LH)8HL3o%J(W!#x| z_{{gL@vn?En%y8u`RkG9{vG`D;4^w!DBw(C|2wPdDDQz}EGXq_tLEmcZ(OawjjR?$ z|G4!RPcVqc!S{L@@is#J-N2~nR7m#M*pY*9t$0|dqWd|G^;OqO5-nC@PL=Dc!Cakr z$ZS5$e?Ah!2L?`zrbdR2`VzQ$AgFn$Ba$B*KIfTlGgjQyYHPoqFC`1rA*t*1hp6Qs z{#xaYL2LW4I`_)%e(OSPr{gbM`$uN77GMSOjAQ<%Low3oU*w(-#SkrVdPYx0kDW*% zu9w|>5$xDJ={2_9O~ccdGk9f*u~rYU{P`ZUW-S-L?;?$Sgu|$}2Hp7>R(Dvvcrl#q zS^Y#>JU~$R<1iqt^B-4UFj&3ks;`T99q|#n?}M%=*L=*Dunw)cIC6JsUItG2>*eHmjnQhrMje?e@)F* z2u(b$cf9p^<7Ed%jaw=mzy_D#ekIVglv_mrcm>_uPX9 z(|#ELLi3-e6PDM-#3nDH;V7`>!VBg+Hc~im7cKV;&DGL^WxmELEa5kv{+3C`VfWt# zh{Q7S^aBkk^TfcMjzTQG{zr#nX*E+~FW|7#)091~fjG_h_)!Sy#ZCkmY=4sYIwAQj z&GUCyNY2XOb8W}kX!K8(99cA&KLuYpU=>VeHn zbURKkTrYu?Ci$!n%_^2T{eS~0HZvKbXh8m}zQLgW0R6JC>mqD%U`_EfkE0y0=55Uf zW6w62;DgCCQYKx?D zgI4c=jIF2Ro~&=oUZSLr9~dF^M(hwmB(K|7x{sB1cKOVvs6wns(PCdwwdXIi1-IZ6 za!jH&dCKM)eh&BZ;)n!wk^dloF?ksi90Y()uO$+oBL@=c`G9bzFZrIkxh|M$XRZ$a zW*-}S2fzP>cF+362z*o$7T3aYov5)qU7Rm?=x1i=VT{jIAWAE!Ujf8yyeB=ba-;&s zX8~w}frr91RHc4aE-mvxFr5{?B;;Q0T$R}(pX4@Eacl5d(5^$+IDoo9XYG-AEa@Ls z*=nC7&)r<>OjS+gWH5U^bq@!X0tO)f?)X)j>;(Kb*?k7)jYx+srB%ma1Z|Q&rPr(WwJv6RoIPdZz5=3jtI z0uHbCF#YU#K6^>Dr0AKpqoCiJr{iq5+H2u0eCnmn#-m)T|Me%w2iAo^@br7ALlT*r ziqUoEo*%e*ztrj)H&tlqL0ELb76e1Wl9cGk!*ky)z+2}Gn53eG4w167|0@T8f)hv| zU|8V7FA6?W^r&ufw6}X-sHFT^(p-K1HDUN0y2UCo%{mS!ED091 zMA2>yS|}<|c4MtD?(lajA?<;t4qbmUtenOgnLaF4G}FNiJ~wnxv;eCw>%YV~;K*z< ziy|crN(Mo(g~NAJ*VD9C+f(Qnblhe8YB7@l9hiK+uacYt&Q|5A&X2X$4O})#Z!}7P zaB(He_Z1_2203|`H@lcRFK=J=oH?U-ZqtQVRmXT*O)5WgdDo0nbz`pb@$X=7?c%Sf z`025^=r51a`)iM!3g`7^+MAS7J|t4|;I=BCxkzQ)jgjz2wHp0v7UNs20- zYn%C?O@pVO;5Zy2#iiA)`5RyS2TN#$u=F=Ql1{%)%7CxOGBQFW`kngHCVFL!Bl>S)YIOD18pRX_SgMy-h2mjGh z@h4y>A{ahOc}CXw+l?cX|4|=9Bmmm1I<-{!=_0`&IZnF@ndXNY@;q*8GNvXp>e*k6 zC1relcJGl*-Kcn!%k(F|OeB3?930!>EfAy9tU6+L!If&?bm2^%{#z27J$89q@pDy} zqbb>PYV5ed3Fo1`Nqg8$sp@VH#&yQ7UK^LUdD3N?XEWdLo(tJ^G+HV?2tC_;>h)N4 zVv>l|uFLIq_m(j%)_?_ z{?FPT#i1eV*OmD}w}o%{9EG^14d8iP_wT; z6)FR!*e&-j>MdWj?{c&ht^Vw5tT^dVIiI+<4C2l8lo483?^&4ef1IDJCp)$fZ>SG% zo#Yh zC)zfBkuLFvIx-9q7y>4aoDb^JBa7^!+Aym^fv0ns&CQ^*?_myFp>OfQ)$-%9r|cv5 zv#|Q+W~U6-5+VQ`K{VHkaZt~fJ2;j^L0KBXHVWp*d}u)Wl;>zPHfH>DL+2n&p1_KQ zk37WJa|>{w&BnUQFW0-7iia4<0e7dg)1|xUyt$2vt9PS3$jg~a3-7-6T5&y#s{ z-}RaN*?gG0w50_;Igue73B#mFZ5Gvnq%ihqLn>#_>C;KdU{DxMR~XB%PgpP#7Xq6m z3W$j1`VAoy!C%F%VXD6bsT|Tq?zb$2y?sg85=JuaG#jWF<1+l*$R7h*3T8ab`x<5h zP3I3xB=_ep!S>#iWks_stA}e$4SOW7Oq{6wtB==tIoNSdPzQ}*M2Jhr;QTgGWDKRl z9Wkqc-Y;3P+@Py%j7AT2VeTy~nM;FFZsdr@oBh_-B$tjuEw_xOd^WAdUVfh`Io8$6 zk@zwUq4GopU5lYddl*^t^t(vL7lQ(uGt-yfaA|O3KYZd9bq8wk3M~2o4&pAzKlP0Egc2_BR7*3z2^j!K3 zoXeeI6)+WhaI;wqOTz7)-Do2^a86-FwFs@jbD@GW&lrs|j>G|47 zQjr>~bMkQ6aVQ;-1z=_d6;qhMAq6miJWh7S`%B-F#?DXQSi3Ov0tuC~K^&F4{$LY4 zOo~|96jS9@)NAh75Q$9QCEe85gpieMkcF(K<8MvT-r>&SeC+J>WcQ!r=1mS{h2b9P zU8iAI{pFk4*n{OakUk>4FQ7KsHNIY;Hgv418z zpM8g8SBB>-*KIh>a0Gm`HPX81NVY*)NbC0pSN40XGBVb=zYpKOb>~idyp)z+Y|fui z63z5T%S5i#`Slb%7sM|7b=z*bsy64ByWW0h6w~)C`y${>0|CiTnr{6Xp+?!5#SiQ* zi+-PKPq)=c1KlNx>`NTjI5JSFg|B?7RP=ocB%1}s_}%K?y|!r~`WsYO0hSJ=hvfO-m&S`6j zcx?9n%8c;k26Q|JzsdBzf;qZys0m#8Z(JXwZf-#hOmRy{b?>1>;0h9xFn9WWwTHog zcdlJ>vN>H+I*-7zl_6?#ugh{jZ~B7x3tzy#RW2$T{Oi@ti=$^q69R3OjhlA&<;3VO zZH8W=!rSWj6fZcDL6Jk4 zP#3+hUTt8L*HMBFq z(SoSC@QU>9d5--xw_2Be_raxy4ho&|?*T!!n@23Tx!V>ZK}PjWU*4~SgS#-C)<=qC zCViwb-?0UBppIQ6nmXw~`Z+CE&qGGj_5Ct3b;8ZSceT)D5^v|wg(Xuv^em#`pFnj0 zjk9a{w>1^e((#b~@(dTSbbnfQQ=IvmM}Nfq-L#u_W*0|p2`0uUL(amU?dn}UwX)qa7 zac@iRHl?3Kq3|0@HZM>5$Ms&_Q#NifW{ zEcN}>OWH%G=;2`VXO`y4&0voho53_Xc?md3>0uZ8M)+4R@8EX2EFv1dP{D9-n9f3h zPuS4H!n`f7zyJjt6@KC4zD9b$X&TSBMfsv9*NCZOLc~mn(~MkEakKnJXhx7L*ZDPo z=^rxH&63#8&JG}`Z3q~uBPH+sSusXA?nbb6+q}sY2vfIBDf^C5YRNAaso$eacsJtV z2FlmMj&S)$J8hw-J>Lv2FuIwi?>$cO4vjSVfyVQG8nMg^-F&+#lD4|*dtsPvXIcV@ z4<;ASf6vz`M2FhH<$_a2^ni-Phhy`k)niQh_;lPuok9sq1jRm@?H5 zWYGORkuD8m)#lG=)Kzat4aG+`)9k~mw&jRe#=xNEYj}K|^e{$;p!L@ys6{`Wuc2Pj zv=4yjC7rOo=j#n(I=qF6i+*cdgU?i8M|_*i<(&NnbKd#kq=6@nNuEQ>XG&i9cw-q_5Pa#7&9P-FP# zBedQ}y=u<;%y-ZIcVJ6jUX6mBiu${Cg7>GX2o1+l$fLxi{+BG$5opHTEOAqXDeBC^ zO%;nKBG+XYPg}M4FZ*hO6(?0ptK~<1d3b?T6>u=wSkx#Xz5Y;N$o3d;P`iY@YRYXY zO3n{DuDdw^QiS{P;EiZEK}KPRv&}Aa*_{_W`Mt1lF*>zE|3y$;MGFmN+Lbq<3esQ1 zu;zNjko}mvnH&|lqyJoYP}b|NPkTsiYCpHFbJs!G{0$S8lR^>-Pz>u9SG2iAq}SI3 zg077LvuHS3qfq$SF?yVUzuWw)Y2^{=g+UjnpM35fv3j4|z_dW=VfIT9O-x-Br|G|S z9K#>jS$v2<;wuQbU#sgFhIia4JM^nCbpm|32F2;!MR&%@o5mLXwj=~S4r&*5na!p3 zDsOh1jFVD*nL~0HJ9wyD@mo}ECg=f6WvtGP(*Tkg-wa=%o zpLANJZx+XU?)r{(ePp0a8vEDA)8Ci-?Q;wtG!Rqp;6OyuPPi%Qyh#3!_Ph0MY!8#p z=VC%`=hAh%F)^}#n@JDjsJv2DCY1rBDripMQz|^;8+xA~YB-6&shHKK#Z*Y~UDycExY6t(8jLm%FJ^INw@C|0TF?Ppn-& zw4+R+&3aFtjsvEOW%9SPWAg}lrlaVz=we+vw{g05CZx0HTOxJ@#1FutI-G= zR3Px^rJy{3+CWC3g<{*ifJWmslqABG9!Y1%#3$47F*EewmUvzI;-~QOHM;Y5QQ!fG zn_N@v_0OvS+nt4PUm(mVuNKq_ifkSyF+I+htd_J8iOOqH+nHR0R{9?z_;+vD<_`t) zcJfI`A0UKWAe8|d_~MbFE1|12)l3B-H;z9&H3HTKsGdMS-rswkuXR5Gy=919j*FiB z|F}40fkBvIhdnQ~i2h39+KN+E+kg!_*Eq`buPnX0Goxfb&9|A8IaP2)bPv^LaDLq- zG#y}~rW4dmGaHoqzO56F0I`LMn}k-4{EeSq@Vhgf_R|yR3y7W;eMGX$5k$NF7)a>} zYiAnCjGq1saF16kj>Bv8ZINy6c6fJ3NB^|9i%xPG*0Qw4aro6sQ;I5G zSMuVIJwuEys3E?y2D|eO8tR3HncAye^0}uV_e=M03_^{ocrbz97dReVh50(KmeTJl zS=>+lr0k%Q_=zaW=;*t*P;V^pXddzziCatQZl}({;9&&2dH+|5w#Mr6yZk%)4XTe% z5C=aa;pRX)ydfkMgoMG?2nC`apU)3q5Y0lEesxLcHWS)uU$^3l23h%2V%f5@)iI5$ z5lGv=Y7f+}(1@;&CA-();PW$GQHSb25A&b6_z4Pm$z--u>hu$neojpu&fHXpYl@o<=6h*y zky`r;?I2k9*un6-4%*LZnGcQ`?N$W?|Exc4V!{4Sh1gR*Q)^(x{BJ^kHk{=&kxV!WsSG5gIEJvMGxiaFH(D|9!TS=^G=NYmBZ^<9k( z4F)*IvQJ-qXM<<%Q)ezwCLaAFkRO1?4uQco6vng^yNC%T*QJQ=1Q#ZV_LqiH8cz~J zS~%e0uj%**ATlUb8K6r5%FELMc&@S{lVNs)lZ;^1SX>l(ZSj!hes&PeX=~H9o3c>R zrR21EV!j_1hc}vAv$lu0VD!VlrK$M4k#-mw#?ZV?fBMK~czzD$AtTt(u9KcbTn9oy zV*}8bAw+g>ey5%lMPvPVOS*Cj)T3(7BGr z_z4tmKgh){I~z5;>rch%msE%!%~8A?!6H9u;H92jE-Bu7mkaG9gb=2%zbq0t8X`e9TuiX)AG7F!?F$UUsY>X8hJ`+`aq(lcs)qF0_%-Kr{bR8+%G zhW`ex-J1*t%^K{#G1FVHyhe~>GlHg)gclqnR0^+gd;83_rM07PYnfo<-~hU7QmT1$ zf1oS!y^=N&L7~a@wHJ$saaIB62VDyzF}-GyZO1+RKYQ&@Q2?k z6p@Mg3M?WBK zd${FO9yN4GtpR41A=2?H*VnAUfcA3~DK=GnhU8i*5G=yPVN{HFx5>_FLG%esWhv4yooY!eO<7YJR@t`+J5Qqkmcd>w@< z5=AFPTZ6EakdOq&Ty89*`<|?&r>Xz9S!^bFFEN>5dRmDZHD-8kPvj&yU<{b&7U1GG zUEAGI5CM`D@=~rAqm#csh!iPIu)Vt*x!53gsS|sF$i^P{6$)%L$LIb{JLtY}5qZ%6 zjO*AZF1Oc68yT5F5dP0B2@0{qwo>%&^AVqWU<}UD)RgvnD(wB$?jb*XmhW&wG#;XL zh!iQZT+3!j1`Y*O*#QZ8zx^Ejnqy1`Kg9b<6#PC9M3PRYCLIV&I0IEPH*7=Ap7}#^YZlnF=BKYOtl;eo zEVDg1aUJ0N%9I0wyK*>kvci#h20q|q ze=fU5>Ebu*{o)@hPyv81_7ip?ra zk1MZe2%Dp0TH_Gk2Y^hiwz^3oZ=tBHlO->lVqSdj|5VuEg2_eGoLHrw@St^P9U!SK zYJh`Qg!q51)t6ErqYq0ZMZ%s)6ol1eAxGHe{LGMTjG4h@UQ;3)yqi0spxcDH<|D3 z36gqRv1=^_L1Qq4l`AiNe|5^%NRtdfqdp(J33XmI2Z1smd|jXAq+65Xt|FGAwsv6s z3G`ysiOnZpq29*?qfs9~Bvp-0yTjN*z^2U&Mz_Tv?ie-zc?>a3TDZ3mVp@kkVQGs) zF3HzRDRD`wv~Yu)t-w)y4Bd z!BFM3H=^Ndpai(6%5+rTU<*%IGrGK*4x!?h+&cVRhfS9du4$V1SuRIy+y1hXmr==Sw`^R0EEX!G)A~QRfX){50=J49&7`0>||w12&TppB#lj$F1i0ZZ1C%^;}d-kCh6i4P}W=$0m71yjQ}*9vlS?6 z@ZZ4^{}{{F>a7b^;ehUqc$z|V=`b?+E$ZqWz6^Y-f`wydsqv$=k*%P*c^NrI7@RJ z76U58-D04Ta4#tAGj1M2ru(1L#x>RFhMg)gSy4~ueJ>IXKtqC{LRfMgp=J2249aM|ChX`)WNw z>IYk2rQCaOgt)5?Q4SLjfKUp_k2Wl9AOsPDx(PW34jWwPZut=3+%ohS_Lqpfd>V`{0cFi5(i@lni6M9igh_=4Z#NHzX63dAvFOr zkigutiu@8mx?-Z>2MMmzlrOJ(@86g1e>NffN`tG&Ur)<{`OowkKp&TlTMasRcS~GL zc_T6!AL~3~Z4pUVZtGD7} zSvHL|iID~TQ<$dSx@_7;__hKKRVNqX za+`DYW^{}8`IU)qVe4{hOV7r~>s9N){pGn5G2Kq)NAe6yZ_92s_xjdY;8RtO-ean*MO`S`y5&(8-buI%-?I^bO}+dJ2V3PN4i)Q?x| z%i!Rm$1QYGOmJaz1$3|Fo`bUk)hWH}&W4;EUGzamvjHGR5{mSt4M7LJ_2qoR+xECD z>u94*16u=I&z0uQ$>sajqiI5_zFxRAc8dNBUL<*+mI$N${0!$HB>Yo?|1>eMyAm_y z021N^N3LldR?HYZTco$uE_dbW{Slz5?j{dTvv>%#5Tc!nbD_9nMHl%Se)@g%i*elF zHa25>1aGHTgO8^1R~8?(U+>*V<$-HQVi1yG@b1yQ!u81S2MH&X!X)=G*VaxlBzAm0$mcvq*YYr*Ns)4_ifgYx1Qm!b3qR zwTU_ZbjA#a%7Wj4BQ>MKox_K)B#&F$%;-#`x`K=2tn9oL81jB$KVoGC>~&oFBO)U5 z@hcTHqhHO+gisfCuV_)t92*9!iAR%~Tch&(Ew}Nvmc4(m7mcI##|?#w6H-KP-OIWl zd{(hhuuH+t`q&>5wx8-Pl&_+T?1=KnzPuF64ED43(e~^flo;%z^f`3zLQ;UZJ&jJDPHgPvDK2#0<%Q=6jB5HH1@X zb~>eNa3fg_%_m;I#=n{&)uD-K=)=`rZ#@EL^J<9u4nd0QHtmcRT%bp?o`2SKSS-yG z%1GHYqdjNr{?oGkbdsu{vLt7C=gD$0C&PjI#tmyyKBf)J343|zof^*nFl zo8OoS9~?yEGyubWJOej32P-yH5csh`6C)*e-WbyWBdLD>)CX`rb54AkJ_z3B;-yWJ zXB}MTr$n&L%wW*tCWUN?y)0kGIEjXe^H+zTIX}M4O@}qk-69*^ly}vc=(gjZVK_Zz zG|h<-Cr8b-<%Z_Chxy1~<=1Yyz_;bS3^#w>9bspnED#iTJ*(m?Oj001dOn;kXsyQl z=-MOyle%Z6iC$^-Ck=XAq3D)6Fec?Z_O!Hs$M~aK4h$Kn+wuNZ(lrJUrRsTAhy{FI zZM4a}xv?Idete$aP}V3VEyDvZ4!@2M)lpHK=7U_o6_sHp7CPR;~*pt;q`CKPwyd8n^-=u=Cg$bQ!@Yp;_as3!ywB**H&BEEqHTB_ z=g0a$58QZ6b&wUzG!FG)jFW{kT?Mb);iIGjMuUU}^i!`}?Hk;A5Z&|!^ORtX>jK){ zMKxvZ*$OS7;OB;3i-6GFL1Zml3Zp)3rO-0jPHuAFe(AHuohd6{}9x_zQCI`~@^BM>qmndTW52dLD z!Fv98Jb7}%?YAZkW2jcQ<3Hc!I0iOLw6+0FL!Q?p>svnvWTR`}@ALp2ZnL_bSx~C! zR)vQIL_Qpx#uVq{7CC|Pc%6ibvpQ9{fu%<)SGY{6Gxe>HZa$1}Uc_AQE?`OFk9ED6mI%AH4aijOMaOliBfKd|54{XStQGdbHPsa4g!qTYqo8C!(* zmq^Tiysxni@T00-S|3=Z(9fQTAegYRLBBUzpmDm_f|)qSdE=%q9pW!nI!H4O@3q}S zeLt)CDPB;tDDshVS%xMq*p}Xc7nt<3Iy-I3&d^QngM^ir6sU!1Z;_P3n`rcTs;mG@;o8Vu3~0y)Z(N%h|AJj%^9o?sxZD z_v|uQ`}U+o{m$IW4^C78nZ@`w2X)Aq7+lV*`!+&&v`-uiC&5-X5`lvwfkjeUZfVeB zqj}PTBC@Ss8BIPX$u|4zbq}cB;^tg{TE9sJ9?|nECWT}t2wu)5Vn^6XYn|dws&d$ym%JFjXhNmaD z;Xhe^Z&K01(oJS4QX#n)cJxu|Vn)?w@s{gA)KX*Z5PT*B@11b%bw<4x+b&^i^~XZnD-`tLiS29r7nKG)t`o<{R%>1B zdO%lB1@XS8#$l>&pbgDhNrDUv!J3+@Pjwi_ zri#LixvIr57zt}giCM=cK^wD_%Lw|^QJT;bbYJ;r5yLy85 zU~W_3!w=Ach`t8(H-G@nFMt7~qOT@*0`FA=Q_>|g zT|Ye>TZ(&J>;JHGpjo__`DJYs!KKG>XgO36IYUkMjTOu#HISQVzqE#FjD$&1o+2Y3 zzEa$u&HO-&nIEJ&jBI807*stgYSp6%P8rnv+XH3l= zKS#{Oe5HJ77KAFAWyi8J%)Xz#2g3Mlj)f|Ga`R=OQm?^Msqx^y<8-gFh(FmWNUrvLf}+3P_AFU}>ZfBM@%L4d5FC%bynV7qQjS*hf}nhj|_X1EDH!?9t>=}op3Oz&C#sGgNqn+_lO|7u(c z*lJD=<)fud9-@AOls_t07N%`xO2r-B!l?ba2M6+_ZTpXz_5=p}ki}xHn3&S|Wy4wmC?V8@U7iyQcCN2JtgJ?*1-n2pLHr=#uXM=z1amZmkst8hMeHg=Gl2c( zzKoAIm0PiJL6@G3M4r@ynifv|w`IrjY{;c4%BG2>&hdj8e$y1Ql-juALoKwV%jUWokt ztv)b@tUVDou(>Qsc+;q%@M!l%`B?U}zRNoH8>?|x*Oz*K*PL*4=~?ctWck_3Kso|> zJjAL8l)|-N!ZdMO1$4*1GFXd!;arPoo!hH};yngn>9l7z*jhxuBx?gRXt=EfZXJxT}bNbz(n+ezpjkw>+ z-GI-Tx<=Ex<@}8%NG?69GuFm7sdf8?+YgS)0Ir}i^taoV*DP* zdoddSrk^&*V^3z80Ygx+J6F$S``Fu!_42F}wo&0Qzo=+W9X0mTYwu$4kBR z&rFp21mj<6iF$S3_d)qV|b`}M< z)T4wO-$`)2=;(2oQ*m2FHCk=a-<++RDDLg=9YSz}2&K(?TDDg5ymz9gI%3eJGP;dl zDPirV?3eCUr+2DYmgp7X@`Py=wSH9`2dE5bS=S9!X?w_s1^)#%=k`#}_%Rr)Y4;*` zx{L^bQ^w!C`XfJIaFlYN%+v)%wpui?Nt%EzaG&elkL!B=MHzJ$KcveJ83BjXngFEAVTj+|zWTGC9euF#h8gOOu5$ z!XwNPz1CE{;4yWQ3w5cxj{2+LMSeM$hv@<5@$y5k5>9Zj69#a1r{ns2fA~&_eky z?qp~azDPDd#;Eu3h}9rLh+g#>kh9{Dz18K3qt^X$`D*K8n&SGC|A`z)juI}Rro(Z!_Wb?mRDyTRmqy`_D5H(`cH;17npgm%`CQ<3o` zv&X@90&^_`G&rK3cp9yejpMb7k{COTZ}N*H^xY_=F|0?TPM?rZ$QV}<~f3EviY zqa4D+3qK1mODHP553qz;VB((&VTt|EF1rKI?0SMn@Yu|$fjkqTP5Qt0ufAs=+v3nd z;-{vT*gK}@;GQ1PwGW;V`f}JBkKqYC$Xkcsg_e10z}wN_R-c`g(vX+ z4mZ00uXpSwzgimu9<(iK8OK#_KG$40uZbZ@IFCDT)(E$tre(J5aM8t-fUf{YA>2MqwiKUa= zo~sOW^WGq4e_<~dfI_|E&>yQumtvEM+cZyY4 zkhqBn>7zave$&VLJMTyxZua<1#N!8Xxxf(P>L;?d!;V+cuE{xx<(bml3|!z~x8`tb zRQwolL7FN#@Pd=dEAq6`>P7K0C6PDDcbUJy4tu^p8I#+j?}xkD;}=T3sh(#0g1If2 zNc3+0yGTOHY>O4U!AwKw^e@zQnhGUf`B{zySQuHCq;ja72>2%p>7Ru}LS_4JR~M56 zr@5K2;oU?1)&1VKl;De;;UQ=P(OT0}<|t2QrKIs&Y#=3?8&lT;EmU1@J-J%3JH@L!X3CrQ|f%V>!)gb-SR>r6Jd+zv@{Ma!VDfT*85{ew9dwZ?J=V{6I#rP zv#R$r35+Fz!h5GeK4bF8ND7@;&CM-1t2_9*(~EI z$Wt~hql%|+Ep3zQnoGyva%}~QbeyV$(W{MnP3rJ}juA}6VMm@SHHHur3;)OOM)o#> zk4^-bosmRw5>@?eA*?a><<}VaP*PkrV)+3c{mcNYr=+o~4oXBWq1^T_6ad-+n-@yb z7jNmG3?7xKS=eih|4NIYYWtIn3e~AKNeLvHsrfp zX5v&%zZueAV%T>_dh{SA;YLBJk_Yjfn_xz%*~w+oces8~IPTY1>C)`D=nWveKq8 zm8`*?36$#ZJYnhV#hbO=n4|4n3m!+&G3D3$Wi)O(Vg(7dGE?>%UBpGZt|BP0=?qH6 zz_c&^D7@gwb*^u0+UI|&oJWTM8Q#Z#S+A##+vJxm=)??J{}D=HU#=hm;JYxmZ(!YW z*L4%babg8Tv2z4m%j22s_S?uyG7Qxn_*FSH4TgFt0IoiKT2cmn(5u0Vu1K<{AV^k? z>yGa2aRqD6tof?S4xjDo=Fr0}-6K6EG3|HEm6;z2oHy1VB>nqVQ$uQXhZNWf$1fuO z5f*9{!!}%jnA|N#p$U5H!0|LtVG%BWe?&AMw=|8a9(%fysm4Iu3^^F`K*Ay|Ar`;t zg1RHbr}LA}-R!VL*+ZZ3jri!-Vw=Nif>M|~a)A{Yf7dmZyX6g}sMYAx!m+xTsS3wA zpLF|)hM1|8#M?9nIHA-*24aY-0606Gi@@Fvy|5|TuMkMSGpM5s-jsUQuR>e~{%9$Q zA|#!%3wOuPNpW+1aZaMsSzo_|OrahILA`iLO55?cqv3-nyisSS_Vbp6B^^D+1tz@- zcJF6O5m&lIQE^>&(rK4dDt@1XcpV5Ln}r1mpf?}+!lLjq|D(TQs1KcX#&Eqctc1ko zj9BHd(d6$LVDm*i6O=ibN~^Njq>-`NG>jIuzCKgEgM*`)Y*AUhio{#^QEce1>f9r zH1O@Yh4zqMdz4TYO3sJ5z;z%}z%DCxOjD6Ns4aQRYh>WxeCW<=m+UkgG3`elV7k0& zQZ=hqH?hy=LX+CXp#KbZc+w)Kw^?002~gKlx2hAGRX0U*)G47*RJ}_jE{23n`c(jf zlm|mNs(=$<`?CG%FTbcva(#Q)P*_relCYsP5ZYiJOWdib2;f((AofXOi_`5|i9#Ax zG^VB91z`cl6WxBJ5u^EVRKZGXn{pXB=B;ae$^bY zXIRDCw4eK}1c{XCB7-i15}O%N!?v28C@O~Lmlc^B|2sPcy$&3`reqerMOM;<0az8= zat+Be2@l0FDJG5+^(e&BY$8>F;k#!Dy-`(C@6ZsCM{~NA?M@Nl_ylGB=OyGxgn`eE zP<>3mE5b3k3v-=1U({$MmV|R8I*jQstAi5H-!7M;iLdqFj<0MzAJbW2{tw8@=jF|T zae3F0covKGvElrMKfaES5YF=n_IR(`6yRtVsyV`V<6mnMj(LTQnX@cnH z7U|sZ8|-Fop=ThWYx`;(aoJI#{6;tjLOgg*EM?d>fDJE>A)%;!5?3xWW2H%l$QxD6^06+aFhV$4$ZTYi7cPUl^2-yHy3%hFo#F^CNd0 zWeQCnzrO?gbQ&4IeES8N&OtZ`W|BN^NE9TE+#$z zH;FHJq$(cEx_~*|fsu9Z`R|jBB*)-P2b4Bwyk;mA{5NpN7Aq3gV(22${4BuPK>T3* z<*GzbPsNf*Epl@^-5|6CHurhJG$UJVp%jRv8yUt#3lW1GWE3u3e37V7NPlm_Pnsjk z7TI;qDe$X?Ju3q}Jigdr7-b6>rE-Tvt>EauwE^a^tV517U+hH7BA>kJoXDycO%21) zhEfmUBnAJ6V|Cq@^$+)*N&TANIcw;i@N%AZ;f>zs8g5;%EO zRw!SIL;~S=(tGZb=6CS^H{m?#y;c?Sy#a~QbK9{Uv@3MOC5W+8qmcC@&K*c1CQQ@v zORZLs)8V6WNH_~etaa<=)I-jZ?)RM6IhF*%OGxd$D-bbn0m97v#~}zeZ&>~@DWhRh ziBZ5CSP^wc$1vfO*B);uuW{5q5OXs!WhyQiDUUvNCJ+#f8tXFVXn2_}gxv?jW*5-P zY_-j%g#(>C<&*7Y`Q9VS3Y7B5NsJejf41n&OY+^dDX4ZSZyEp10JX*T@B=rUC8#pM zB#6y%cB1cChVFw=r`0kD?Sl31OiVeK4G1q_;JMu|3Q4Gf zeF44K6%_X&B`U1S5$|H}?BOBQV*j{68ipsB6A2hf)2a`1oc}usSg>?Uo{66Y)|m*O z;2KTDyM9$kmP3?p{qQs|=FP#vvAWA!ZyeTj!TPP^#lXx7sVwTsiQ{&d4dh@!V=*bm z)pZLyNwL43oN06@Fz5DEJ&|QcFZlAj2O{8%G!2e1*=V(WS7gk_4PW`Z<@&0wM3G2P zjz2%RR0ZB@$2ns9spa=oK+rcIrulmq;xWZd5(_*85wYl6qDp&E^(^0`F?eU4R=Xa% zx8RE6PZ-kW5oXMPb2{@CD#f<0nwJK#ga9&5u+>c*we`JHHVkQx?}2pR+ufEZz$#?w zaOf}!TV>>h|Jd{Lo`q}`&O$DC$-+=UOag!`b)SC7sdB5rOD+?Jh;~Cq$6bX{O z0vJxR>oklpPbwIG5KpR|q@t-7YmD-sep-SBvU1&O81>#SwMRL1>9d6hZfwOee!J2mRu)*v?gp!K=e!q@6YU>blMQ_5pD<*h=X!oWCTpwrAQc)(9xtm}CAOvIM^r9;6Tjf$4f7pi=ChLaYot{-@8Fq9v3N zLW?wYD#3(T_&R5V?WT%}i6Rj)r%q)wG|FA>j z*_TJObQ~NjD(i7Gj8@3mCCkjuu#OVaT5+qgUXPp3urWlZOvXbmQs`RyQr5^xOqByh z>BeVLNP|@@D7wCPcX^i){-WXMJ)sDmt+#ebFdeZd+p^*BUROf7V`8Bq{Jt%yGZ7X$ zBZr_c*U!GdNCT++YWVkM@~ z+yc>K6hI!@bl}}S1t-Iq+C+(146^J1e#0xi2J*>;uf&c9Q0eRId|8a9!oA=BUc8^1 z$T3*fBCsr)2Mf}@zySVK`C#TjajM|RK+@GSz0$^$`Q-WXQ9ZO+vj4CI_#zzX&3Bm5 z7yG=BY$1U{C9pz;LUR4fV;uLB65awO!LIY8u=q${sTN<|mJaSOlJm1QS9iG_(dXm* z@Fm*s(jKLR5WTdSV|6c@q4pW##4JJ*U^AVK-X4u$n*8JV;$$WbnS9FrH$}>nUZ5A* z&r9Fz2mO|~Pn=U~Q>I_s#HTtniEW7i#=VXCsR~vMyVgtP-;9us7NK~qNXL;`UoXLh zIxj7|XtQ#Qbxe;{^h}bznnDJgrP@H%>+>XjR{FA9PGIMwqz)yFT3DM|b-E;_^TaON zB$F4Ba{Mx6FLG`^D1oLmY-Ya+L$LD9h%Ju0U4LtSV~?f?$Qm`^veD8Ouxd>j)8h;; zX#SF+pP(?EkQ+p2>qWR2UflGzuFZlc)LslW<9kAZ9Nn9X{ej_AnT0+foT!`)F1@#Rd*lTIcN;40vBp-oyk{a)OQ?L% zWd76II>iB}TQ0<=!RR8K*+Zx9I_#{$etTo!@+yI|fPG1XcS(RfT&Z3agv8ec3JN%y zZ90)(%^FfQ=y|-@qp;n8F^a1wD2OP3Pamea`MjRRL#7k>brKS~DS~)1xt%;vP!#>u z-i34sexPl31|`y1MEz&4jas=?a~=y#ZwAgHh)w`B!hgdL#Eu#dN=TgsQy&6IN8n`H zp64T@S;c~$+aD19iUbH&OpYVQRy7sPh5jTc-WT#oRC>CJbsV*0-ir)^fG7G741tkd?FaM!85-4O zz&~do+Zu)}T~)-Q$Rh>pT&fH}@^_Pi3@+PsLka)UQUa8{tt@b((k4Tp1Ccn72_IWe z_fUK-^!hPBAN>S{H@`)wlKw>bS3ZSy&_<#VF;X^IabHY-$Mxl++y-uzhLZ0)~T*1YlWHT{Y+j1z`tpR$jUcHxlOgITPhW+g-vl$ZKf-_l-ED z_P6P)S1q@mLP0U4r_&ueH+sLfIdL2BEK@=qd@j+>TJ@KIiCWER8;*b$mi!TiZ7nw4M zFoRb?B^E?1GsxlHs#*rc1e?0W#vutoC&L`Y6NV1~6ndFKVS1pSV+v8Dvz6Orqs4e9 z6?+m#D*1gph&;#y(_I__T%z}PfiTSuKBXuoFiEk0+f^V zfcw5i%@o!+3}6^+5h)@lyMeS5wm3?}+C88o{0KCo_buDUV1>Ky{{4_x4M{mE1Q^3M zePTCZrkoUgC(21XRmR4pw!(laU%bn>-}58>vdWV;u((X1kG;x6@I&FjgZfbEHT%4v zaX~i`DD5B@EZ5#anH7TSQcmona7b8S!--_j;9pgeAzg&Ub(E8<;-Z_U34_J;ASA<- z82erDGY~q5c}@?Q$B07^ah436Aq1VZmT_P+AxsCKFhk(}`EvFMnk6Tp22&w%F`X@xxaE z>~D~|%3wt<4VhSl*UX@`dL~;O$f%!ao0SxI-UN#k?N%4+hX)+F4{K>|p4Bnb6CHi~ zOEIAXU+LPu$9Nc^hns*#KW7IQt#g|p?{T29V9AqoWI`1QQb7n{T|xrydbJx${@yGb z+y$BoBqFe1s&ySk!8DuvuiV$sa8qWb=@6w|8E7Ygkbrm*y%b{UYbdi3$V#>QK-AVR z%gtQsQ39-3INMRB&Q^IYl#`ACfpteb+P`uLfNUWIw%+R%WPLO)J9T=@>y{x+gf){W z`rXkIs@7Ls;rDL$9XV%9P#ng_a!|orCauw!_y-~Ypm?X6p?xEuWF7;a5J?r|A2A-- z0uI?qzKI?H+0PU0|vr`(VP+$KY@z)Py;h?15mla#1;;UW^^M&+nl zl~JJ|h(RhqL48A`W#q*>yVEZ|QPF%~bO1Npwel%bID-|fse`eVeY~WOO!hY9ZI)2} z^};2(6V|rM0978ylGwq0L@hx9CkJuTTxpcukk5;?1B&#Bpv=BbL&(uj4YYS2Xho3^ zrFd0(e}8u!PLF-VM=}&%{QH*FJ8F5QsrqS@kt@--$hi9=2PXl8ejouD9ucES0_FyN z7cnn18T)XyMpRbc>qQ1+md`fcm6LT3`2#QWcK(l<0w*b1$z5aBE);2AH6HEdNZ%vq z=LA4urIVz!p&1jks*b(JkFcwOQ>2(JU)DEMrSDEk^@KypKY@pmy5rKpivSELIv3{Y zk0m6=egtLsSH?-cn+PeeLOR<*%w6)Z(cLfidOcC(Jv(m$2LQ@kPIRio^9g@nz&RGU z59$3k7u2Mlo8zKZhYx5$u?0Wj8`+V%-TLWW6#-+%e|lo_`XjqGMfGd(RBirBVF6ZQ z9FO56*OSBDWo4?i>_x@YASz@X*Vl3x+M(A_Ua+<&8yL2&*mbg3P26F zQy0p*l9j$f^Th`E!*N#Xl{^#oNJpvh5xma9%S->`w*1kW91pODEmdpG;e>FW=TNno z(N)kq?C&h({FxZ2#j%2vC^rZ|ZtoIlKbuLC+d7e*ez3_JD`KWUu%V>dL_aKG^WL$2 z=!2L{avFj$9-c?#nb=9_PtZvd0Ld5BS#=FeXS@T}D{tGGvLk}hd4$mxJ+@H#WS*w$ zI<_B0{j5V(Sg;Mz#a`eKj9&641k~e1ilXrDPBrap>Thw3Q$gk z%I{1)&_BR1G!#J_jjm>y4N7^>47gZi%keu@E7&;rUF)bDAO`Ud9GIkb_#J?N1(xjN z&k;)T3yTnB@j_JReXnQVV-8}Jn&f*|jeNh6AgH5>1jWKb^DRr0dY$U+hJ1YMK{4uP zI#`B5cP0a8K`r)w`m)$`Fr>00 zq<^mTd-qmlbBGd7k5Y&j$1-Tpq02Th-v^||LzE}Vfy|65)V z$cLT3E6qcYSL}CLoHEec&eR1T$IO^=LQsS?o9XDT^32^uJn1<9v4+M8B@?oD$W zpgiu6wBF;}OA>LR0LCIQ;ltX11erS!xfxI5t&os+nd#jW@_8gNPCQlSniNFZkO9VQ z7Q_y37Os+})g+act|mC*1ik6=z9z!{D_t;j2h#J4KU%={^ywfRsFbREA8oBVDB(R> ziF4ui11zT88?KP0zOs>z$|C*#=Ifc89;+P4Igxk9o={(WUX^E6jrV`3dzUe z+?n3|I8h85dgFJ+136UCFxUV?l4n&9^BIQJ+ z6_CWQFT9I}c4v92sBmZ(TR%sV*!iSz;`OSgf$ypLz*QlAUHomVUb;tLX08!VGake7*vR`RDdG<;ji{FFWr7k#|W(F zQpuWc>#cXBEJ8R)Kba4Wj7Z;s4QZWN?6Jz-ZwnRWhMY!?k9>2S)~9*~!mjiLT#n=r zd_-%E8L-A8v>2>$sv=0GAK}JEl53N;OlUA28&|0*R~#Tt5=@I$!jYjta(Z`$-f(|- z+Hee3o(tuBVs*{$7HxBIbb@FZJ_IR-g$1#GgaL$+#@>V-bg>ZzI~r%3Y9BH_@ou45 z$nwZ4uYxX|N-&IL;@z%{G+z&Swy2FcndGOZVTgNbMD!6KvOXXFc&3OEqno{zNt~JH zASR+Ajdv?xh?=aIDzo^lJOQwB#*Bs3-e%_``su#&9?6dHms&?RrUD8*BrT z1TI2@luKAjN$QPXNKi4-poblTR7ALy&uM8=$0qN0lU43+7L1q~T|QMWUQL9a%O(U5 z`n_l~53Ham*oc{ucekbtPp$V~Ok%qebBFBf`zAfVh)g(LXyY7l(*VtO$~XS za=^#NE^N~I#6wgdvC66RjmCm5`teY-T_)~V>s%D5{|1DAAkotlH+Ny!G%@yyq?S_~*eY_UfcV}XmF zR{^qp<5CRO|BL9717EPDdwVnodO?k;elry;cNsYfD)vW6=F*a26f+1Ja?VCN{Qiyg z+u9(@CnZ8*1xBwIj>HBVx6c(6%ZsxL>f>mMGA+DILC2NL!IsH}0Eu80gkkHqAsNl; zVjUNlSAkH{s@6{019?|Oy+0dvC@00ADkJQ4VcoGL!Whe!m|{F;(HD_X?}g04_6ARY5rn0 zr=C$}2ab;vsjWs##)*5qvXS{YU-D$8sB9*!j3hcAP9>j^PT_HV?Or3PR9rAEU8y85 zl#u$9x1M6{r|N<N|}q#4r|8Eev)sN6_-Gq5G&Y65}@RzaM-d|zVkhcLx>Srp}0T~f)I z5|r2yyLS%1)gj_5jid2hPT{;ugA*jH{4QXo2&i!rXFx%jLS15FfajZE$Z4S&9WB^Q ziMoI(Yg4)y2CP>eG7(J`pDCy9w-YK{SF}Ip!#&R_d*saXvtVpyuWmd)W{7NIh`OM| z!$I6WGlrlCt9((wAv-%*P0f8+tAL)1;37WZ@!um%1R$!sA@N;QKzqKIknszu8|GY3 z`?OSH@$h0U@N<8*#AeVj3{>wxR+;xg{jUsYv;NLBty?S*M*%6Jzod^g8U`sA40h^^ zLyoli{pZlp4eHu25OoYTE}&lC;vXg2KqpQ9!+eM52G-V?Qz=zR;$@a8%gW$LD=Yld zw;~ICr3shEgp)t>iXD(~E;j-cN${$pa1-uh&von$1F1FMvu)Q!v%)+mS8s{7KiH5H}cV(pOv`2L~qI@EN&I-a7HWqO4u2+ zb1IAwo^Dn1N)3IMHFkl|9`i4jrD0sbmlvj)64w4MG+ff=yv;K>>paOMgxE6R{2UNT z>jp|AP9Y)@6+%J#p@Gd5(Nbcdfpb&@!z5pk%tfj4>i&~PbyQP3`yj&WALj}Nx_Hn7 z`g&nqj%i?ePb87*W{i&|be=eVnh|I8IAI)wlEzs1VDSpEB(0|#u2-}Vf&y$R8owED zCuW!VjM}I1M?j=jwjiWKu!?uFF)`(aDWK>$vOCF?Yim3gdQnFYl<5}}+uyOOn z3I63Vic@V!VU6k4){?%9vF4Lb{Vx@ezWoC~K-?E65O#VdRfrTg`eOftM%>>8)uXM2Fg~#>=0ZWVzk|(l!}*5y4^afd7{FMX^9x1sr5o_mt2F3yAYbfLk+)Psi9U)j zx6pauEYH|?)(Bur3*T* z43T-XZtqkKymwaVFN6tz^by^d8+mN714^f{b1Mx2FWS(TKFeH6`i8hi09!%1sCVKq z=^w&&&c9k~LxU8}fINUj*#H3IL_{pmc>E>!lG8A!3X6`B@`KoXRpdI|SanTY&DSAQ zI5?XX^L!f^34`uxlR8ZVK;Qjm%teq@`QL7NWOSZxc~vv_7Z00ej!$BI2Hg)Q-2ml&o7TT9M?p+P05@!zkT%4Adu!fY z?pjNW@TBBR%cCfcrBB*y-yB=He>`?TXgn2$y<~LspKQ+-! z*kdpTnlPwsfykqGuGYvQ*J6Vc+By(&7BV{1F62EZHsYq$u;k#qd(Y`N_3T$pyj04} zqOv+;xQ6^BhmUnrh@ucUooeo9XjQ8*Vb-QpW?My@!AeveBFNPU<9~w3E7~Ut9`Kh8 ze)lCZq@=K|HMuI<*Ya_NS+|?65W#jsQ-65h?$xK`s01=7Ut|8VRdemCXGPu5rqoBizy|b^~QhoHUzMxPYGpeDE zP8lw*WmNx-T)@-B7#g#uO=aPBP6g8oA4W01X7P<}#IM{gfH=u=z8P6h{ALIWip*;KF50(>)#2rIQ6ZcOF*a)+5e-rHNWoOQo`RW3u|hM(nag4hw%Qn`R7 zEgvOxodh3FmdzO7?f~Q?J3HQimEo&4ANnNfy}38G)mBNU9g>`T5N!YUdIvV#1S5k6AE^`398p|PQR-$h$(3>6njEh#pCKOw0c}I=-it23kq54h-C*FaE`#_JQ#8~AW%Kno@ zUyICP3WvkXdVKql^s(|ei{1)>?$3xk@bLMgAq4O4sD#?UQwZ~o23+e9NC6N)jkOTM zjeztKGg5XWdaKl=T4C{~=iD75PGuDG@>4F9cFLz-sSXlt)LXyV@p#mfOJGhBh<06T zafnPwz&zC0Ecm3{HM35FQ=0Y%lmHWFEFy)_rKrwc4S}wJ12qrzPlzTq0Mu*$S3`>b z^AjM7ZU1Y>^-g7K%2@NUpC)I@5bge0dn9l9JOqlhc45;#w~q;6f*@~k9}IGwK7n`$qJRpxN#5NF7Jg<>)v*M}BNYd;g0_b!%WXGW#r~&` zmLtg4d-Znm>&2^WGJ5i+(2dOE4_%8zjWVw7FJ-ns++og9Ox#}8RZDbiCgCo>)^-UZ zMDd!uuN`BjLrN#f0U=~ac>kZ=D`00oT(NFUMWxd6F}*BzAm`Qgqzfrnp`$Bn=HU$= zk4>^rEV0q&KZg#dZj?#RyMEvKmVn~goq%)eww#qPvk6LQ0gi)+*_?6Hi1@ewX~6nP zS$02Zb@6cVp@JSiTZX@_ojo)A=+#x+xT2wFcB=38w^xT#(vyl$#0aO|aW3);UO|%S zvoup2grA7~h4mmNm`6edY4ADg+s$0~1UP@enDvF)bWi;YrT)6sADt@;Zt%Lxv0zTCWmnV+c7r&%OL=KW?VpC8uZSQ%+}k_9I2u!QeoJEMNpsn(%f2 zX=U!NT46$*q_fg_7gnjAqrQ%@Bj)re?l?rje!mdz%D@G!tbqNYN3kOcy@6@TmIwav zL*n?J#yQ4GP+|MOrnd;^Q&l0MeLv=?D0~v$?AfiTUh|2IevlbXzXG;c+IQrl7Mcd` z%uraSM=!dE17xom(x8iQ#sD@I1vm}hP0xUo=QKo8Q5a=xc=ydqsiKDCtGX280V^V5Y_4KPq9{&~y$FZDz+OO!;+XX_=KoCQ7`?cdJ6xP(J@<1CK_Da(dS z#dKdlIMkR-hqFaXms9AT(~NII9GeSM=2O*Yvk~nZHh+bm=jWcD3Vf$hh5dnc|CueB zsL?nBe=rD&rJ~XEXA#0!Rr9p9apuzC1IZd)rz!i?4_^ZeufPc8B!fQIc&vPjBP*iy9N`O zjt~YLN(%6+A7zLHcsR6L;}GHgiG2U-L}29r_Ak16sT}R~4L<~RzD`qn96gzmgQj$~k!v$2@SzcfzgmmYjq`bmm3Y|a%V)B;tqIBW zgV?=7j@kk=aW*tQ#ZP>c;w=AnCVYie@b~2Z!!V_^z5X%_DKmB4qQVrYrTN%(TH}6e zBQz|?TFSrTtHk|B9LB!ZbnICIq?&$!m_BG4prkNPSY;Wb8#{5Vl}xw(Oo3!xQ0GfH8@G{+gz85I)`*IM*Tk16hEy&)S7IJddGQs#5^Tfr+CVQ$~dBd_2|UmA8ffD?=?`h?tKBh#DwsRsNsIZ>rq=0A?z}} zYL1~4XFNO_K=_Oq=OA}RTA6?oI{Sxs{AS#5!QM5isZ<+Ei7F$m|L!*TQJq1C#MnYq z$GktW?l)RqyCb{knk?(^|<%d*VDyd_M%S)8xn1L^HTt;GpwFmov3s6!8{x-^u0DDoW$5WkM>N#q6 zpyuCBKOl45-~hO|3=b%4l4s;08w?6%H2V3@Z36{?f|o3X8Vrkgw};T14(9ZJbd!ucbJeOCd@s1fRu%&q{XiTU~G9 z&9o2BZns@d@nqmeE>KbOHiN;7iTvWv)-R{ZHjsM zS$xAr#d>~e~)(&*)WzMpm{uo z`gquk!A12$u$yk==&ar!2C4C|ziMNIW=>L{_sy%H|Ebv)lMIrafor=dvjsK$yX%dh zs$)SZ6Z}UjJ>oh~B2Zx0{}fLO*mtz};wt>Xi^vOX9c*TZ&~4epTzTpU%(;UCox z<{!060coTPX#DPDFt}pavL!<(WgqWEVFTk?AE~C}Ken%3SC`r=nV`1j8TbIC9gf}H ztcZ=a_4<(B)*Xk}Gn{KPb7a{$7Xz*z{~vbopgbSRlxuqJc}eBuRu}>n?cIuqzZ3qn zz$J6yy*KZos-k?^L;pjH9-{}r9#R%8oQ(cbNVq|eWl|<|I?G@b+kd!XdDa{CZEBb3 zF?P!0dA(@%Z360mR0@ zr2z~marf-tyiNvN^h$RQKS_zz7!<)tqo6xbR_z&a>0OK)D4r8pxCey}6|(Yd83sWK zlF5PWlWgf+`wKtttRaiq1`4aDPZOGo6_ol$oE}G^CrB3&4X@Tc1%%X<-^f^(+k7Co z>as*jWVCYJ6e*02BO9Sxgi|dBh5PHtSPib!O;F)`mCJ~%;`D>*^k551 z8}9Q_epg_o8)|lhfzX|4`+a$jE=+AGb%Op;3xa0}TWU%82`!-ZpV7h4Vfg^-b}XUG zLW0F5IMGLw8zRd5u!Q6hN?G*>%8KLRZQDRbKBT6-I%ov1|Km2?0BQBsv0OZY!1V7ncxw)>r<{WJ7-kc2t~cycmyn{w)&e5g{|)Sn zVDQMehdX(I+AOp^JM6Q{e1ki_yy=05ga%Tj$$f=s=PNu+*vy>)PJJl;dIo~7gLk9V z62j;_IyQmtJwh3m^-DZqSJ;?7d3d-H>vQOGF#eADX)+}$V>C4ZwZ*^?!L_iM*bcXq zXI3{EYv*W*3*Y@=fS&9hpfUpL-W7Z-!~@1~r)iBoLnnygO|uco2MS4f1c|uj$)>!` z+c|R1eBt z_C;sch}!U4BKNTbq(>`v0#0_?eh3q1zM%-m&SbLEeovRmtJEVt2lB8ao{hM!^Bp_5 z$(O0~g7hhkLIWWv?Mc1Mf8Zlm+l`j4h=Zg217k9;o1B6isRmnbJS}zaU?n13A|z(W z!x6s=<~~eleI2A@30KWKyJfVP;Gb-LWedY~i%<+H=S)*7e32)$9&O7eaH?hMz8PwL zGqrrZLL$q3`&_2S^b^N4!BpV>D=OO&F?H)Y74d_1^c&dT>KWL=6+qKh2K&=$aPIfc z+cpK*GhZN4D#CZhKATP-Be(;niH3eTuR+NKHkekdAnT4+r&rY$;W(l49ET#(@ zcY8SYd`w)eGUSKros4ZBidhK?<5kU_g)!vx!PJ=w9Soo%vE{^0H`OdLOtme7h%K>|Q>sqC(f7UutO&g$hYj>g${}V6?Vwbpz{6Q$ zQJl2C>6S&;Mso}#tWGL4{am$`Z`o)*&IMqu4~!UbGG7;`hb_>1xq-zKSijG;y|Wp@ zTj8=VAj53)x#nfDr$EnZr&J}myP+c0iPPd(@H1S>Xn3^#Y_CYH4hi3rl0_b)XHy%= z2ii9M96#L+FpAru{e6NQZ)his@}Q~uB#+;V<4TQs6d|gS@)_bd&2xsWmn5Ajx}1K_7j;jsPPINJzmYHBrM{EZ~HZ{ zxjDnsQb|!c6{stF06tL_N_$7 z2%$Za^HoQQ83GITYj5>o-gnIW1%*-xWVHK)PofK^&<9S1v=7~;PI`Jy!9Gs^Q!7&HCL3y4rN!&dhS~#+BB?jsg3lUt+ zT5#NYz7Yd=EU)J{MvILSh$CrM_U>5r+u)aLoR4l+m5cE+k~r+P#xgOCy(ax_BBz4>4zzO1;V#S8NIaZ zqo30Y{wP;F4q3(sQ}UY521faK+(IS`8^j3&9|_?z(%g7N|2jf3nwDx-Fy*UHvww9- z8DW<}3EQb&-3gQTAG6y~E3O`R$nMV$QPx}d%~t=KDku?cv-&kPPvy;zWPp+RrF!Ai z*+vh&6j^LF6hd!r29n(v)xAg5Xv^C=5KGyYY_@g5&548z5bDn{bZGuXE z3D4SqJ#d#mncsZwu^Be(mtBXqZ&+OE={6eahm7ex=W{r;A)l=qZ=SGmm#}KjwBhX2 zgt**A4^a7H;_`~y{62iOmfh#Y6qit!54>YoljpyAHf*AQF-s#yoiX+5SO55&8^4P> zro%cHF(yyLmUJn&Q&UTxua!wEEFJbqnylHwT$2)~1lX5PM&t>$aACyssV2@;RX;F-7f0r+omMj_<8YE53)G=PKVWSEWkuL zbKl}{zP6CVj$}YjC6`h9-TBt_S58oo2#2u=R)p$D4^z!z$vjGoK0!5|<7!3@Td|4E z_!k8|^m-X>UumGZ6{(~tN1pGCjP^0eXnuMAGxZVYaF)}@vtcmV&Zx}k;HzzU3>P?r zhLXpG4l36f&A^IVM*^XD;vehpLP_S9q!aG#E(zrZ8YE7OO0%*Lb;T&M7W*q1=Cwpl zFU9F%MrzXVj>!qfI{W5#vQ9+kz7m(_VPs*e_G=0mM9(9{EappZ3ykc>B)$KZw=WIqM=pgSq&cc|6swqjI7X4!=r!$+FMDcgv+6gSeOKHqMf$sU0)1z}Ii(8{>nW+}{yE`dj03qe>MaFOA@I(AIdG zpt`fCyVbJxA*arOP0N--D#kYXu(uz*_`!j80>4yXzBFXv>SEz!cT3$#pCASQTh3z} zxWJ)1P9#9n>>02g%qk!p=UIQYWEJ2m6@ z;rYVY{xfKoY*oLgwQp0D&#+UmQEYUOhfuwHRE;pahIF_rUayfYwFT5L_Qo;1x7nN~ z{v5LF7V}CymbU5l^Fkl7tV5ngisfg4-;j7cs#yEG^)fEPw)}yM-l7s#M$|*fLUhL~ z@_mgx%SQN+a2MgoT#51+*A)KDyaGE7IlJ5~|&yQpB zUdoLrx|6Fm!2!LAx=oR6E-0;Erw2R1zd!}?$d-AVr|T@ivPg;=lr7II;8|&Fx$FIT zw4fuRC4FcdMJ_kkd;IoTYM$%XX}WPJozk1j!nq5+`mlpTFDXY>Mqnzr0;k)Fy17E@ zP|A|@#lfEVQGNj(;^fB~=5KQyyUfQgN!z>-o?vRFX(D{Jh25H@8YFT4+U&ODPCEn+ zp!)HECcoqaG_fql9h2%L3PIDupzm%LVV{3+A}{Qt195}QJe3{~WJ1$M_?36A{7sq? za~eEE=iK8x(29C5#)UhOj zMREMe#lxUhdz6xvTz_DWPYcE~^Ie(OwuFd0MQu;I!Y;K0 zad;VBb|Q*`Oek(JWIgxz8sQXgiabmZoz{^X((AVSa7)8F1r}-$cbN!+ER`++gA2(4 zEWb$)pIQ^vbqIS;9KeYE{Ep!|#P}#qq?T51! zOA1{}iWLh%>di{jr+!N-;kD(0mZd*e(^$&Tc86d5#m0SVH9%aI!e#|AB+OYJNtB%S!O!^kGDynUYy|+|&ChBa|A* zY|!}g%biBXiwl?a>~yG0r;!#su1<}Uq)1T>t>XhME!~6x=A)rkZ#9Yr} zU|}?_w@ED_rFI*@6B+cknjx}74-;)Cx&bxcR!LLTF_~jY#KKw99bF~GoXL2$Fmr_s z{3c8Fl1|oD{Wb2Sm>H#ANlXRLkfR)nm5VRg% zBH8lR{ifuB;DPCZ2Kjmk{Ya5^x2lg2#EK^V-8PiyrnVloKS$R26pLnxY7z0UXyR4P zGeOTW6I=h*xCQa$J?ASt_s@GkF&~!{HB~lxangS0z-95QENs*S3f4zLsw~qVEfUiU z6jHyF5G!0XQurxIzti>JVurD0Qsp&hNTMpiD=t9`ptgB3e~v}}DaXkBJ28hmVKHXZ zi>G(eo$Huvr23;c<%<;cmparWw&5>rq^`jt(*YijD3>`b-^0jc3)D^|XHd|teSi4) zxGWo7dt$En;#ZTzqPR?lGL|!6_h3~s#NORqt>6b)5_Pv^^&GS$pt$s6lleYtN!^WH z;%4`2G%QF8ut9syh8{PHY@jfGF0q>ql`fF`NSGAw8c+BL1dcnHJTY&$#+m8BvNHgU zhY09Ah0a6xGKHIhBXFJcBS3*YYZ8$ykp*Zc8wl6?Hzg7>v&ENoBRy~K?Pn9^!*G!h zGJtg9hD4+%SW4o2ROf+1d5{5Ev#i{m>?6rTLi?rK+A!8}Qh={a6=i@~<52AQ4jD$= z>?M>evkS2GdM2lJo+VZygfco)(kP5g9KZkQ^!TSDKi^{jviE25Ls}Tw0DThlZtlcz zV>$>uzt4FQ=&vUu;?JPxkeRWRE`xedL`^-Az3+H`cki@^-;(f^YxxR=mE?&?OKE!W z9iV=Cyd01bw=H+AE^Iuaj-4W@6CW?|ZkSYO@Vm;fe>g9z{vcRab6@kNb1(CFo{#BR zrhG4?XZ>Q67vtn6jyF!C|5?*#rxR+vB*}>y$ASY7*>#MN$0=TwheVv8-3M{Ra(HZ|~d7>ldM{Z*79J zp-A3cA*3JvELM@WH3nRx!{+3#Q@Xpq{ct|ByQRLP_D)!i9|fifdJm`buAgQKTDX~y zFKOK@GA9T;pq*!DV`{qOdFhEAwMt=CO7G8msG#=H1^-ast;XC3`^ta$52yXDoDmP~VQP0M5N*OO+e zR}wzANekw}8%euiq;@T$B7cP6Qw;I)8?+B}}c^P2o z*TQX_^3j$Ru~zGrzxB}BThbQvYKT2nP;^%+a-1+A@4sFEE=VTO>*Btp@gxJ&G}l|g zB$L}PYq~jvQxbd(Z!pz)76*>S2DU19Zj7&2WS~wtK~8@HK2?8|^m9;Ze}oF-y>Kk& z^BYbQ4!?gUm~*x7G+$BFebj1k!|p}&R*4XH^n&seY^Hy_BCEF@3X)S;KVHG>#h7v? zl9l`&ZSDAbS~~k61}2!dW+zuihA83+l?*E`k$%leP#L0cv*=P2#qmm;6Yj<1b8qSh(^zccw_Dt1?LYG(ss?7 zo>6^N+J_%?<)*W2f}eRYg(10WxohrU{T6|82NxUe)g6wK`{U7tV(574V!)RtS`eDe zxAJb(y+@38Wk5<&AF z3|&|1XUk}?==C=wRbMMY=Y;HmZr_uU4{b1P5OyVNvw%v5sEGsk&M^sI7KsLr7k=xl zDu22u-#5`Bp&--MPpPIud0svpHdx!l4YwoByW(M@wJ}F)LDqZHDm5n1zS(^Zu{a4t zTKQ%(p}Jd>zsw4bC51e55WPCio=$W;thlX3j63mic%qM2ar<55gQmY^9l6AO+)vGR zS+;0tI3$uibtp{3Ng&G)WrlL|K~in2D4yp6ACo7vv_p?V=({u$sN|WDz@Thi)?n%n zja@BPZ@GdQYu=WpYq_Cq{f=Z1NpZ_z!^Vs{aZ>_iJW0j?Ik3KN$;K2)j&b^k?bXx- z-P|LQ^$`M^sD{`fdy=o^M-JJUZ0Vm3e6ETsyl;uo_-dMvCo!c{VUfIe3QBK>@ohWS~?CJ^TxyU_0oD{PI14R#mO|R0^U6f33{p)Hno4 z&WVeX@-LHQ>Y}tF{?Wv#U!es#@fjGVJJx>CzHA3&NMSkp90iU)7Emw->S5CV0Em(q zIk-=3DE7Ew_}MmY!vzF?tq2&N4UGJX{drQ_=E}eU=f(>aB6@*PsmvP|_l##tz_I&N z_H|SD{L7BLau72r{qwEdSo0y#LV}#yr)Lw)Ide{#v5+G(c{y{GKO8;lBmKLjVjB-% zf6dW!tZrA!{4!RMKM}TY#T2Jox7W-W@Z6NxsF!@IgXvg2%#3Q`X|^^)`0|*qs3Wx-w=!CtG>3nSSJi6bX_Zl7Zs#6G5wm(9kbF72ib5DOOc8W402Q^zY z8ZL7^iqCi;zGLFc2(cRjfjI~YR5QrP7+xt3vms3GM;**=d@)^$BDaBTgkUh=Kul&| zsVIA4sko(VNaA`akIBka$?c^zH0@d%`?b-=F`cEEV9htN-}u^^(jqAh>3jwK6GB zw8pEr$py6k!$ZT4s@=e$CA|ODmH6(gnTGQ#PLbPgy`-65uGg64#lD5;VHcO>$EFF5 zs&Im*7VPCGrx=vMdT_FPK+clGfGZw&8tmx3UGnW86mLP|NEjN|$qJ&mZ;kZz;bb@w zjF5gSE||>T5GNr_#(I@5zI%xkEpySU@nfn|0cb{$xZqNxCa5VL;RH@=i#+NGC5LkXN;5a(NAb1e(iE9mIWfgvan*u?hB8|qA z_$gQUejWeN$gbJ+tdOqye3j_sdatCq^|Ux|p}wL@Kb2S6-N(Nm*smh?GZCQvi*Grk znDx9GtsX00vuptqr{l;>N=qQkl@1E3$Ii&fQkfx&hXSKP=DcPGXEvP`)J3))N(GyH z*VoP@h=-%OGjn==l;pF3nQAM*=)jO`t%gf`C-5KH1)zn2K~{J;=e_py=Mg)zHhSVt zjFE3VvLXOQHwL1luSeEab{wR8BI zj=5^W@zuEbp=Sz2{W7D&W6yZd7YcQe$iS`9EZCxq%oBumjsX;Y8xI;{+5S+SxCv3%ZD3ApkHe8=klG5T4nF=p!=y zWaFN(mf1cvXI+hckzoN%zzAscsglghE|F9qevp!t-?mKBSLMn{@WAFP%+h%b6H=>0JpaLfjBW&b&Au%ZOvJj z1iKfOvsp^c%H1&m6+m6xV!G;~`t~7fPyzP+rI_OC2W8Gohv__2!5!gEn6JFW3$Px} zRZkoFN)9K**k!3Ym6ZbS<2V5MMiTW2-=P`*`8k^UNn+kc=W;{D_N5({|170bWP^HE z`vs9pt9)}N?WubbnNEPaHn95w8VGiZT)O&ju}LdHXE1 zvz%O=Mz8)Ab^uVcGmWKu{gfA+FE54+`G1niM!WaCQcbyS*=J7S&RT*Vbjd330YrV6 zDV2v!EaB5TpVs~IP~qdeD)#*5qvc=kJz|E9I9CwyJHX>cnKYj_=X^|QV{{q9%g34P za99x)YvMyZ(t#hGl~#vt8pJ}|F}pk`zMrSAdkQCM5Dg(TsgD^XDln+&xDO76DPi*G zG$M_=eZIegJ_&YeYM4XtKPHj=0hVys)sjZAUW(Zv@rT&~#p<2=ZP0bEB}WuRq=>k* z(OiYz$>{!qC+t`1+b?Auev-1r)yRf6rjwAv%W8;^Qk!{YJ?yh=g)B-G5B43re!vhQfIV)OAVNB!s)DZPM@>;PN z>kgOWUlzz9b=Mw#4V}DNNIlM;dmiq$J(?PxTsBpM_Vg3MlnzhS2dlqw@}Xco3g2B? z4<#zRuKVIu*1pqDMm(X}x0Jv4yp-$fbv4UVVkO6!K3kyLNA5HJ&*7&sfVP;akI1tk zzF>aQ;8o*eL9F1_f;zozAOebHL;H))V@^gl;IDt-oVy1Nu__L)!Ry7TC9YKXtzzwb9BU}W;}ninfwNo#i{R)MoJs}0Yk zarO7p!fl|sS3XDTzsh<831ota*hnnW0(|kQJL=kujlD20?2^nA#OeF%W&M2Pormc6aSu(3~(qU!j(Z@ zeS`BRV9)#MOh?iq$liz{2bfyYMNJL%W*vCj@R1@Aq8pbB3#AC$BzGR|tyYKn&%<8# zUydD7G#Z525VuS{TkpWWDua&C0JP#fSZ(1+x*A09SN`tO{@cj2yYAZcv&(~B?}h7W z<-@rC^@G1k2g(X-E%z4sRno{;r#|QBDSXc$-{HNsK0Ry9WYqsPu3Oye zm0_%JNk`5SnvR>WZq@J&N~}TFxho{Pvw{K{_hQj0i}m@j62l15ucRi#U-=Ihch>aq zF=U@^^fpN1MkL^uBXdu$RK!?zxHD?M-u@_wY5$`5B}4GAGb|VQ3CfVc+x-5+#Wm^R ze|{G&qO)VqZf?!cY0xy?|9JRx>CiVd#p;u4Q1Mo)?Q{V3`DLA*087Lo!${Z^?(g3F z{sa3tXU;sc^TjhG((4?w?tYeDewrm66&<8D2lAj*hm(l7E^y_8KL0y#yO7cCxQ=1f zUY@G_^iU$)a*A=icuLFV_t}8^R7mS-jq#f2#9gr^!|4AW5F`rl!@BV>ZGn;d)z?MJ zurn!(AePU})KySzC7_*QM}!am^{;ngv>Rn#6!hKdV}EN{H!hr+nR|^lT=Ey!c->j% z-PL{~G6bJ@JKf1y{myP}y9nzr=n*Y28Dic3>5_h|J!@(Ww49w%7t|y&fu&&BX9VNy zZPOgk9pa06&{@?SnOSbbzI!?;{vFZQkw7t0(G7d{+=aJjc1AgW`7Pb@Hl|F(m*5{32q!d~DhQ zRP+jHoT4Yaf;L;YbVye1Pcn*L0v}_#epL*;H){fGoHmLXW<{XGFSgyt$+>pt_vK+dI#t9N7ssLxvGh<_U>xuQJ=Bp};*@rCLzQTxZnxvQeVhaj#k(EOOJh`YBq)^k9*zI)c}}$RxlyqfV?GV{Z)frrgM(Fv9#j) ztDn$k6l)MMWB-#BNRZ$bv}aJ5&vgyHt4-Eu9D>jT5PKBZfiM7*BVNOoTCladW)Ali zPQ=P-wERU@@cDiDUqniy$M#k33os$gyH4>(Ub2<+MLOj>#7wAdHMMee#c0rR=-irA zs<=oU45S9AfynVS^_q=N4TbRxWfK4za|aj@2>dS;qC^glKd{sI+CH1jGKs9 z3!LNQZ|K)$#yN37%f)uy3E48fVmNx2^&|ATL6V5u@|dn1N!H2OP7gLr*+sr66^F7& zR_m-pN*K*gEK{EY8NHk-ja0LL1N>rRxQ%~JQ_yja(uy^+X3OCqaeU1fz*%L;)Ni2V zj2`S_(T)>HgsMcP>@!hF5v_0_B6(YZ)C(e=6wI`Qm+d3PkMw+4-O}Qo!RC8-|Dt+v zP8K^aIZlX174b>29Nqba`iuy2wNJEmDeA=WN$ZU8N%P}lfB+;am6M7Z&u0gzG`D}( zX=zZUht2I2{}-oiBoDtGJJ$)lw=4*BZM_~!g}H~c=7Vl-^s1j>Q`jK<=K;`pN%Q@R zQ)kHgw2Su{TXqZBTIObR05{@^xwDSbyJ)>L%@zzOve)?xPO964aVlmo3~qNhQ>S`9 zrR}*c%Xgut9jl8Ai#>syu8-Qy{x?7nP~Gm*)W(17cG3g;BohnUxS&oxyx(d%T)H^K zouFv7mA66m7kPSbZ($x9HRX9uA-p~Chb|aZUe!&mk_fmP0fVZbrl;tNS3PhmbuXb6 zkM4HubNv#*l-(q;>VJ}Nn-lDr8)29QURBriBc|Ofrz=jr?gWR6opRB6iSx{&0NN|V zu0#pD#PFAKFNHq~N z2$2G>!0BHriuV@>K{xwVU*ds^WeKiD9veIt%h8OdrnKtIt%Y#d&g)2|NM_(gTFn9C zZR=prd&76XuZCNYa6(K?3wU$CJr@^8B&!pbCPESqIXW>mgP-70ZiXEeNS5723GNEX zPSdiI3M_7iEW)9+<#n;Ij60>6Ta(vI=hEG*cl4o1_$?2G|Z+((0Ss)BZGPP&-U2p z*`Ix&kzllEhDH9KH1?kFd4Vb!AeT7!(E+va5ek)xu4(;{vs;`dRBLF2dZ~v=h!6hq zHg)|c48gI2S4iWr!V!8X3eOpD|G1b)41Y-DTkjFqvM`YVjr`SYd2m`{0z-?m6+$kq zdqP)<+q9R7%bz6{#KBfz^>Ap&Z;Yd$Fhvc#p>-tZ7}AG6VI8s#$IKgDjP8!(gtzf8 z5v!c)8gG*bnR42YHZp_K6H>&P&*Bv(t7RlWPdr$m3ozt~0z4&Y{5jbZ#V`P);~>*a>@W`AhqURl0LPeH&|z@}XWP6_(Inb#@$ zFiBn5N1>(l4fcZHYQjO^;)oq7^XdKuA8+IE9W9V8cH!YEl*Nl}SF!j?gd{qc znTcHWli%=l$xk*boJK**3P5KEQ-W2r_?Vd~oiJZ`CA9^LY^y}qYE)LHQJh(68do&e zXZ}iWO;VGPp`vGK8!1;6B0HXLo+3?cw7@a2I8oBExZimah)O=pn+9`9ceH%N&hoBp zR1Issl@v^1dx6smbITXDrWqYz5k6DM>1ugn68qDFW%=4Ms*+v4bF>KNV)<;yUkGt* z6@FV`)2z)qJ^hY~6KJMOXH6}!8X#Jw_Au{m>#k5lTOa z$X&o}j7Mnkw0Z%X2<;zVG#nT-cYg?sRcH@83;GC`Hpt)+QYqHY7%r*2q4-nOqQBhu zHHEv}2RjA^kk)vFCW*!+JHd7-=)JCz$HJcX#f^VEu-Fnva#eU1it`=i52UVl1(fq5 zR5Ve_+s9zNR3~%I@^D}NCWbJG29NIckL>?DSH+Lp?%Dm>3bGUU;o{M1cwI=ob03w< z5qf<4ZiiK9GKSG(b0AIaeyQVdxv|E?GtpxL{w_-7_wGUekY!Eu%UA+W&$9jiMC*!g zKk>^f5I(;DqFe6(V49Kl!fH9q?kX3mSn<~Q7PmS%$tL|Yh^RZ@f_0?NvB(p4MtNFt z&hf|k0>^&3A9O8e(m;l=cnSRe)t<&qLb7{l(*R(`u&jm1GHf)xHptOno4Iwe+ex*U z#goCS`EVSL4K1jOIWDBKe?%Xf@t^T7-ts6By-_eRmM!T}u25j;BoSYr4*cxgE&9df zO4)u34!lIE)xne}kr$&Y15?67V3IOvkE>;AE+~FUeqom$bkw6b?Xr&OtgYJ7qB6>v zT{pETG=t+5jhF2s8(5E-w9E_tvM7}Nir_{b79&202qhibh!)k*Tq;W3HQI@Et-x;x zK(}mvMzv(fI*3-)is)RuYu5`Ye#^pI%B+pFo~aXthQW)&zz@b7R$BJ-VKhNkt?LL} z6=ABF^LS_!NIsZfHbGSv?jR2N@e+QY+u?mL_{5n0wb~}Uk=fSvXIyH))ffMTUah9R zP1&V76(>EXhOFhCN2i+Y*p2sBOK9N4>+guP-~wA|>*I_?1POAS6%uc(a&@Bp_1(A6O3mIvkIg^TnFMVIZL7}kxb_K_77ep6@;mz#=z804_D^y+%law*QW29*q zn1byM_nqZzdvzmSg#6sbaoLftM{R!p$8)Q~;#TmH zo10&)gsRi8s9?I%BE+?7Kc;ao%}milyDy!IN5l$qnZ!r~YcUP&$#?41j%6HC?s57J zqUPc$s?ypeaOgow#9&d-EY&klr<3m&dzkK?1-<2T37hr0R@m-hr}(-P8fN^YW0D;t z%DE8}#AwRZXQamW)KX`ZSFKyw@2m@DE?Lhi>$;mUxDV?GApx1Aik>`4L@T;mNOaMp zsb!s)#Syp%Cz76jXQ)J)sp3v{C6At)W^YpEeE_lI!H_d$oi6eca9T>pKttjbGjnTG z@5cb=HIB??82q!PXdiul9KolYEH*DTmcN0>*P`2Rx_$lH)`#Vp!>w=Kf=NawN~pWw zSkujIo1Deri&au>kad}-ktF#G?%;7~gu!y*ZI!srEQY#|WNT$1_INR&jR#vAmeiT?5LCUjQ z7GbX|p1ZxE3hr!J;Z~{>m0W6rLlDLzha@q)qNgktSCLz_1!QGcUGct{AOd&)#%pBr zzcZbB?SybX|6DOm1pxu%c;LyF{~1(WFV6kDPrP~M5T?Ebv+e49bUGV%3S*X_K8;r6 z;Mqe2F4H%ZDSizFEH=rcEcU7;RHnLb4kV-oLO@;@K$KGP^@#shl{agyO{kWuQZBEy zl|yqONkFvr#SpvwSoBlPHH=UU9{k>Fy!x;k7I~}{D;-&~W?c;ECy)vObb{>|ep_we z7?1Qil2uVrl=knb+E#On^2s8DKVs6Tn@Rg5o1+6t18CP zY?^7ZMq&wqosm}8{wdP&UCXd_&ywme_9C3aQ)nwE&mznkj@ex zOU8b1KbN{pEE2?0j&pIHh+IAg*oLFJ+KYcMRuH_n zfg{gxL1D|}ZjyJ8&8))*BeZ>&^|o3W(-XvTk+rOnKk=<)X&9(LBE%mywz()cT!jY@ z)PG_K^VIGQ5`3b6I=&g7BM6(Z4f8HpXh|-GQXlZf6$})7q`w<|&BRwwp>*NHhHO4A z9;D?n^MHs%J>v)4AG30v~H}O=QIq`u$YM;zAA$CAd>%%_jxX7u$kh zFNA&o3G;C#+FyF^_-w-ZI73GH<;R#w|F?LU^VW0C3&mves=SFIH^Mu z$s2&%5M3W_ny$iDZKbv0y{yrFESG-;N9M}2PMt}IjPK{^K30A{#O2e_FV>#%%JDay zSxRS@q_JA7gQ{bUMb~p zsTToO=vbsWn<6M=$z3#xLyQSPXV5GfpWfOJw;PL*}~50TEp1twjrwFD!KJx(7nCRwySnV^)PR+w2`p{%)zZ9e$!T&Rn=AdPUVb^ zbG>}$s`-#&FTr@VihlfrKzwKZ;e_vLwl{&Ab<7FhsVp2zL_jO6%GD287BN^Td|)zu zq1}%jFYZS*z6^!Nmw%M3`WNYZEgriQLnE(@F0afskOpyuK@^;4f zZQUvR2j~UC5x+kyGcUWJR+#<3K2{*RnOC9p-C>3m1(ty0@0oqwXa1-600_XW+&Ip8KU|suCEeI`gLuhI zw>(HK+P#yIakl&g$?}{L{6ks3?U0fp0l{xUTjt|-;)BpIp=>c35r|wV&@BPe>Hn&F zJLZCG?z$90-oiHi8dpQnfSwT?BhVCT@p#GCzl~Z<({~6F`C~kB;J7Tk*Cq(q1+&Tf zuHNG6K}as7O0T+UjTjf{y1Ta^vJ?2up& z5pr^3G3kD9%yPNxK<2lMg1gUv6)!LYl<4^Llf@D;(wrec?f2X(PXItBq`==zV|uh( z4iMbEjZ-2JMFu@dVd}`iL zbuV_GR&VhRTlNYI?e?8!QM%VM`VYH#r!w;yskc$3 z+HJyrc&`AKEESj(`fi$@6vO`s`ecB?qAP^Eo8p-k?J0WG9|?)9~e zRWc)w``@F$lp~$|Sg>&QGZxxuC_-er2R=VTpJ=!Rw{*s>4e!6uGy}#3&ODv%oNt0~ zQzhq?{p^O$D2U8mv7x@WZ#a_mS%d#OiAWCMUYr-T-F%PGD$dujErk7b-tU}y?gX0# zv(tDIRR>M0Q0pB>lvEqf44<3dUB9jtP^y3aPRy)0^na_7j1zB44PiE}v~41R2h}4G zFG-xRvuT$_K@_U*Z_9uHwj>NsWr>5A&pd#Kt9;ci5=$u14H?q@AGJ9T8JKc6iMsHw zuq#iaw^-KJ!f9g!Xs~m+W#GRfbW>5UrtJI?hDk6XD`>uZtca2M4SqDP zpYX9)UI{jrFBVtUm@)jWq}Ln(3x&nVI1QE3(5q;2Q-lxPw*u+%NRoVSHp#OyCv*QA zx;+VMCI3heet%&oL}E1xSv{VWsamp!!)#h-AXSV;hFMSq)*u(@OX#>y6o13(C;vIQdn9tXHbD050JOZkKt##X zaXC{zx989P_qNpVUODfKe&YX29u)A}E5QjH^TpGZ1br=}=7%hOSVn(z=%}UYnfoM^ znaqqr5w+j=!Ce0(?K5%&zTl>+7~T6pdI3C>o0nFjNu(wn_Jo>Ir>pUwhDd+}R^T*g zf`D(QMGE-e;XF@@>OGNTZ+#FP^DzkJ-sa({9s*3HE=g(4 zT^FQ(Igm1yK9M>1JhET%U{o8x*qpO%*M%wm3v*f+$oTwtp)JNw=zt?dA^}%?CdreU z1p`V56I-=7vA)1!M2J#TOT}N#mO6cxHv}EA1k&mKhGEfcHi7AQkqz;JpR~xyqc`}& z_0v6e`qGEJdBhH5zpKXUK6mh}F)v>yL?5?E?F{LKQQ<+gV@Je5HXC`GthXT}JF1HB*UVmUM_MTRY!AJQk|5(O(>1v=Q$V^@j#LkG)$tpY}% zb7u@EG{ck@F2~7R)0jT{%SeO_=N_o)VFg~3FtrX)IXgCc^vSfMSj5=?LSzZv?X<@x z&|UW@bJg^YU*PqhX||WPMKtu`A-Vy8Ep~v=Si^_T=2->xa2Wg{W+f7vEr(L8XA}2t zvKTzeU;m@3lRyQMP6-lnk26vFk=8pL*&jY{Qn%!8J#>KmPx})17y9H->%1rSE@(-S zJ*5ezQ7lqu;`MgYXu)7;B?d@^jJ0_Seuv-7VV-O$j3G%dE;K@*^cHm1I1ny(i28O9 z4RiPCaj48L^t-`}eSZ?ZFPK-qI~~N7nr`?sX*Ie~XR|(oRb;tPGfdJcJ%2oz@ueib zGxj0{!$8NBnoS6=u(b_=dXxpC1U9#7D&mS?B>L>^T)ZgAKbwyjop1jV9+BL@yc|Nq zbL=$Mfh6lSt}S=s$%;F`APsX7V&9dvnI9RfMj68yKevvYbJu@8H$)(E4P@qR76=Wr zwO!JW1_dclo`&|Vn_7dJb8MITCZa8XO!}A1=tbW0!JtCugI<-R_tRoF&T8(LlR()# z&Ub!B#QVlZt+@)(-`AK*c%Sr4v^QbUUY67*`mU)uogNKIck?O^+viP&SN|vn{@~h8 zhI5WNxB0AWJC;vVA1$2?rv29P58fum^#64OIv#Ng5J*!=YsgTdIWuHfxU%&E## z0yzv0C5ZJ8Z29k@&3(Q>qi)`b4OsKguf`T|S{fm+iQ^DjhXenZ=l{u5kq`Ws`vpOu zjcpG*zpMxgb1p?}y@cLpedPaIamnYQIPZM93NiWd7C4Toq%A4na**0+JVVlF$uSQz zLzzQ#neQMAtj?g^?HMn$BPqC8DJ~$w7)_&ZdJw3{?yvmkR$K}#{oeAB63Pt*|_aXuF3cJ={_avt*!)>h_kJ&L6-ekZJe0X zwe-4li1+}Bn+_rPylsPXH}Fe3s)(QP#O7|Te%GV4h9EG+$1#6>Nb`J{9nsUvey$u` zwMbU$=z&frvo)6)!m8?Ma@>@{5@iBO5~`111r`PpI4yq?1w z)dch4H>h(X+QM*dqYL2m_mk|r&0Zs(N}MtdoA53$w&J~m{oGvtLF_NDK}fTOzDhBs zLl=YuVjmH0#{Z>!u6NM|wcnZy?a&1tY4k!LP4sEeQ_AW?AE&HO;$b3SsMisL zm#dk8YsRq@3B><@LKaFk96f4Qg}FNc z)Dv76_9yqgS9|^2N&>-_q=QIMk^UonT(`S0R(<1~iIq*`i=&z#5bcV2@rsJUAFw~b1m%St;V{UW~=s-q)uN|B~u-{qncfGfD&mp$hn(=1;dD?VeBA~5`)E{@=`*UgoaM= z_b*}04`zJ+f2{E*P|l45@)xQ^pQ3R2qWw45B5)nuSHG!(|~JN#Qz80SuU zeD448q;UPzpu2mdC_~fyO5S}~pc18E^i$+CSDIx|w#uLK}%?^?Wcup4T zWS)DdS3Q?@rsM68SLzFKNSA7+ziw^ndV{)yZA<=eTNlq_-@LEtwOc46V@b_wv7W2& z@uG5P*Rq#dB%L!H^J@xC1F-umh{@QUe94K-N=nF*DdRjOfRBO5FGjO>{Ih&cg}SPiQ;Y z6XvW23d`NLzv21cN*S)Ynf!$t%RQ{zS?|H6`WPvW7v5p)vp~n!dz-6Gg<_5SpniTa zcgzAMdpOew73x+?6_!bJcXt3nE9A$kTS^uOdl#COpfJI&MD$DJWa>B%vF&GEdw^WK z5w4%EbUGdKR7i}L|MNQd`&lA)@6n<7IIO_coC5Jt@7r%ANU=8SBXkM*YJ$N1MA9Kw z_=jgjesi6EeekIHkr}3&^e4r%vfQ5YFvhkoB24aRVrmNH$tx%EQqM$P9<@|bVpx#y z5r}!Lu~56gHyfwrX@Spo{pP<@ESDc`<;&t_QM?A%CKEKKi8R9Z+IB$KJd=W@SGOYe z{vQl$?@%$Axg1&$c*>M$gPp+gQjfzpS|v2FVG+>^L4#Q+I)#AXu!O8!w)al6U@WM> z3Y7v4PNF(TbB00b>}x-@T~5zOFaLGQo@)`UXudXszwm_4KKl+WG>4Rwa_g;($ua#| zzi>TD5rYFELEyjMv`6*rU*|E20|9qYn^rxLM2_}d8J1kTEy^jDLO~n?JAGY5gmNsL zz*m)Cw^Q}|62>2vf)`bncml0e+cqJeug^oH8ixbv)Nu;Dv&9PTpAs9b6YW2vg8Ivr z{XS#`T#7;k@7I0|>wC+cUx#K!1&^;PMwoDMi;S z4|}Hu69Rscf2paG7TSEScNB@9&Q}l(4MXWDTE#qY$*cX5ifB@tmJ_m@6SsAG+K7q? znyMzsTncM9C$?uW1HMdR5ZmYA!cl<@nI?8PTL5{nzZu;~<4u%Ia3>2jqD>65Nd$tYo`?b33?P=qz(jV?mic5lI`ylEo2E_jj$@>{Y@& zSy3TO(ZJV2yqlp^id3gD#&2)D5);1zUf>3;d_X`MK8AS$55n8<$Yfi`gRoXo+v1PE zL08G!aR213xKLc_EXZg&fweu1;=Jda%qVoRnkU%Q71WsG>lKlXl~$fQaqFC|vt*>@ z&cA_J!|Z1r(+k{-<5QwwXMkW3o@=mvQxIcL9=`_(uKBvO|5F$31 z@>m*4-q(h^)#YA1Z98p7`)&#JH%rUPUi*CIR=1iQ-;0)nIQjJC4OkNL71OF5LF=@l z!V~ON1rSY@prT9ztu?w>9k}=PQAZt;wAQAP{jv2$&5RK!yZLrCaFecw(mfc9U-h^6 zWbwv*bH-A}ah8h(T=uh>HbU*W_;QWXuCyEh-z6gI)=N9JR{u)ces=IIq!#QSr2px1 z&l;X8n~vxhF&Y3n2)q-}ICS(gxQ~kC+i&wo@S#-cq`l+gzp)dEea~%MqTIJNsWJTr zFxJS^4&l9xJ>JBX&gKcYoV6(mpQBE-J?g~p*?$%IA>!1k?t`WTF~OFO?KX%Y+Oj!= zT+OANJoMi;+#PH-`QW9c_hTdoW{i1UY5dI`b7+Bb!@KO(&C6~VCU~698639bLui($ zhkQ>ttM92ACI7iIf)E7)W+$?RP?l;U!xVKVy= zq?S2U!+;lgKXEv-ciS}{7{_P+N#qFxstdX0ZnLh-=FNMRf9}s>@-Gv85n}o|@=srC z>E}Hm6ii{d>~^YcCALWrJHyW-una5AGG&2q_v7j6({}j}L8FdP8T#a`KNT0^r)>zG z;w&MoLsktjY(!I<-(cN8AirJ@l&Wi@8?J|S`v$Pagp)lA@XI$fzZyPYR}smLj~a9& zdgBCq#&1&DF!l647!*BW?pXWM!ke)Q`rKaU%raZTIfWZe6&FqI!@|Ej^%6qTO!t*U!B_*La+g$|ya zE?H&^Ft6Hcf}0pRH1#8o(p~pO8I%~xoHBHEy*R}E<@bEPT7#10r1167n5CUm$0j!( zfNe}L?N}x=;~uUKc7|w=%*PfdgGzWS(yz#G2|f{Cg`ZDuZs=*x-zSn%rft*;E)4?quYe&gBG}zcK4xz$N(TRWQQ8JwCV*|ps7x^#OYR4f+!2;XM zx!#i|4vpLx#m7?2qJrjV&mOn>idTMEy++*FtTo^>m4iI(E^C}dd5*$O0$OML7$osI zf?MwcJ*!b7XM@}=E7-Y#1oQ$3P%sF8ym60YsvIQB%7)j=TBPk5hDR7`bY2_3uJfl7 znE+uec;yPm9#NuvF5(yXq}3x@Rf3O4vaunGDT~CVv9JCE_Ue%ASJsp6n`_UJOale) zju+w;y14EsTWj|f&2-i+%^cpOSXqMqnEwR{vaC?i(I!d#cD1a7oxV~i;;3E#I_m*l zDq%#l5dwzZ(u=Pnf0=$OfesY({A{h;IdD_}3agqt^Ho+NCZ>gVejGS|A+ci08>&Wh zX`LM^k|IfWTkI#O%Sy|35N_zZGcMk|q#bMK&z`MITh{=skkc#=f+ryG=bo3_|`ebZiYTZzr1*LgZR1Zp?1YHW+u51aJ0>H9@O!yoM3-sz4v z{QB7gaFPzWS_HhKsp|>0PE1%gOyWq@ND_`5YF?^n_Q$o9)!a(EtssO?KLrb12DFQe zRG(ysDmN@sBRu5lEjp>*#b#(l(NRn1Rq-2}$nU+0k4()fOAL)uhzUi?U4M{SL<>_+ zNtNZA9e%fZR4k}Fq$Y!NVm!2PZ);w&$Mp2HxJ`{;gDeJ0E~+TYUSu|B+ubt2v0#K0 zaR4Fh4&pu~u0=AAoTGbXUC_uP28+0zObvql1GYJLg?L^O{V-4rKl8x1>HF&QxNZ&-Xl|$YyyDuvGECii&L|p3rVI!YK-;5 zvswTMMU{$QLJ@9j38#;$*I}UgvXG)vl>DSHoF!{sPbIs>cEcJD*)?T&Fs+o=@@KKa z`0q7%pFH@BY>7~5MV?diwZ3T-L@wL@sQb0ki89c%IQ~miEX^ci z^-ib#A*2euvsHX$AXMTWQ=^xZW|QffrM>owjk~WJ`08;7CItItc=FNZD+g$DUO zvtc~>>u)`03)1zQITsICKEIfK z&_YEIej0tBKC1a{rpzmc`taW)>LS~Bwp*TPpC}Mab6*+6U&h;|4!(UdVgy-gQD@w( zA5{F@G)jZPrDElsg5w{)KA5s!zH!=lv;Thxm&kT4@AAiJi z7ub$OrIR7GD=K?A4|T%j?)dG7R;>`ht4~yT8+Kp~5!Q@Vy^K-J1rk+N<6>k(xvv$j zMf$q*_MrbcX#iu673^@lFMf%bd(fs!IsJF%7BJ!|jM%f2%+rT)5+s&BDsTFcNJy|5 zOG}i0ZYO8$KipARXHO(i9oZI;q@?D4S(cX?3+xLw{ctw&dYE-=P&r|6eE(=`$6n83 zFNxf*jOJ9GEs^@w8c%fj8Qf2;Vj(IOKC|O;S3*YqIEXs&52~~nBfz6v$yjK??NT+$ zp)eATs(Hlb{dtcCrcu`Kyjh0LH}u)kP0uxR0xLESM-b^-Rq`P+e?MiH+q(X(= zWOi^ElV2Xnfk42fUvUPyJrlVkA{+?7Ja^L4O3F2OFIY6j>1`@y*h4$TUEB)uWhn89FP+Gz}Mi3e41uw#pLI|1iX-?;*{BY z%O3-F8r~)D$P}ikPlM{gg-4mXhW+a@M6fNPiv*kC!Af%@NCGXu(wna=E+j>mR9Ht?5;3}ST{Ih6WI`HhK@dAgYh!xELif{%F^ z3=|4ok|iqC8-Pv0LfNvA-~8@~1=>3e|0gN9aJx6|dxw1ok(b|7q|qfB9DP5%gihQ@ z^t@3a@$=+sN5Ee4G)vCv9o|Qj0H##z-rS(C7>=xl|C_>czCRMn?%Yn}X7cx8&ccnS z(bK2usD6&03p6aouHUG$w6;AL}ebhv>A} zpM)xgis#qq%~?czlphj2R!9ehf`~_jBBO=c_Ms&z3g}sqUgBZ@`H-UywJd~oe+Nq6 z!ZIGyFg%Ww$XYJSR?Z3KZ?7PJvXqPfO0 zg&uvWNaxH*B`xV@YfkD}j;wjloUQ%>dT#%!V^k^OChXJVm7|~Ma|isC)>KF+#viqN z-kLlw^hZFqKHsm#vdo)=f+LryG*o86cSD#i%) znOXpx_?s~I#lTfg#C#Xd7m!J{2q;X52ALQD5a2o!>#-Du%(AaQ$@D%~M*GPEOY)ih^CgTr#UL9eoCywNo)^|JungZMB{_YIg^pMofK*(JEO*0%Y#-afmoU zW&fc;a$3V!t6p)4*2LJqSL^%t1CeU@hL-2`Isu-(KoxCz`qFSQ!RZf#^7mJ8Y7y3$ zkM{B;?Kbrp%IKg&APcv9X#%q^&va0H(7^_LoNm{AdmR2mWEfQdtn00*C@}_p6o&+T z5gwZuX+9Bz;0Ku>RY;N&5jgZl7cE3PRVn_6g9bA`W`?N#I@|HKy0hIn6@k+&yRvOp z(Gbli(aQW7Lvmc9vo?!850fd3XrX!}DrO&=#h=yljw2JNg0VjYU~vQC6Oq5vjED6u zR#O{Whrx)y>dBz4(MBK7XYRKA)?8j0P?U}n-#*`X1V~Y!6I3R7L2^S)*VRyGo|@9o zvr3e(37+*JD=j5igamdA#CJJO;+q&&poC$a)OdMFTpqJk95Go5kJD98!sB)@5jk1L zl&_Q-#SK|VXTvdYHSm|E@E$nCycDKhIi-BARZ?vYN8z6=N46_QLNJG7Ji;C#1LXBE z9gFi&eED0#K#S7Xio7q?Gxu24Hbf++Y1zQ{n`w@J`+Y~J_E%uDgHrL>=L1u!4_J$oZAAe$43}y+R@WQ*3G`I{D`2`m`8{nr zsBRXD2W4H6NTB$=TX3Y=79@T|GW&Z}p&F=G8EOsRyE&be94@ndn^$GQbl;cE4)zCx zhy;v*LS2iTDqdLW(dsx%Ir}#d3Uh+#NSOLKIa-!$rbL#nOT{bCSYYkXGzpteB6g2O z@+^t~&mB3jL2DfK{?bwMcpbQc2}1N9Bm}+fwl$A{b~TN3ZnzxI+(Oloy2-0Jw2XJ+lnbQ*1Q1KR7Q&V1kEH zwGCsHEl=xIZLI57mjAsmJU=L^w$>o>XQ)#4FbIa> zS*ut|qvgVjY3Bnx*Gxu!-ILyb63-C97!WM!TrtD{^ewZy9}EB(o86 zz$}6}-Z?2z2-{B`;j*b=vk5SiAPQZ#%&@qk1L5_J=<)gpoL`;*M^%)xSq5z`fuqUH zV)jlPUo|3ZFZt-{{aY*^_g^0C(3C)*cmkvG1kf~ftGag)rbJ93eg|N)n3<7dv4r(Y zlld4Km)kZ%s6{VyfN(*`-Q=7i7WpMR-)OUOW{g0wv_m>CaRP;k2JFSE6)H|QwuDFl zLsR{7z~JLdXEKa6U?q&c2u59pP87`l)?@1HMuPs!&+hXqs-s&nq% zDV-SX`-phbIAbHa%cbgc7riCc?Z^DGWP2pbnO<_3&k#Zq6o9LwZB?Q|!qQ?dBB>T4 z7@G#`@VO2fDsi6xns^&_m@utIwo9H`3cT&d_1+GZb7pBGQ{=eewfQ7tWFE4(A@BRa z7I5H`8E>kp&ArPv4%h4-+x&TP-W6Z3hOhI6MAXiC?yS)P*0aHwIZCEX;$=sguj`NzO3;!ONo@wp(E3D3?d>;YMHWKyG2GjMrc~P z8~#xp6y-G-#LU5yr=`}fx<cuY4D1fFP>MSZS$JL2iX^pvjceqh7M)HHjxS zHUTkI6A9FkqSnxbkiyijPG`_(5JSWw+V?Rb2r}K7MpXb%F7Jx~f~|oG zftfgXLN;}JiLHS33jJ6mQp(5uh~DCi5kh)*_r)DkdmkZqj=K_BXLWLA(eENYHJ(7wvbKRvo)zdcOBrGZ_m}D zXBLVs`kMDhFw%T8M^-83f5U3Eyy9@7xmLcNi;R4YEkL_NijqQ1r zACGu~^tFCYge-SUb61=*<~A0sOuyY+pkJ8Z<_8oW?PRENoDTD(tIS!6-r^IOsZG}m zh9MHjj8TGHE#fbXEfqU4lpz$2Ctp0Lh7Q5wR2Nf=atQ`$pMO)$<~_jovGI^^am=PA zB~-SAc8TSa+s)$5zt3}Puo{$I=%2i4g<29B4P4IcQm~*%tT$W%W%Ka78x8~50m}mP zGG|}!8i$k(+*Qtt;Ir)$itkGxyOz3RYm$l!cuTAc(RWP=G}%JP6rZ;Q>lG6P7yxfpvK#1p>SM_U)y4@5GV$S?b~{(>X6x4`1m!tU;GaMD)Z~%U_>zZ zI4z;cYjFW>ixb~buoPbJr88FdbauMTo!VHxyW^r7t2<`?Fw0@ahgfB<@7pG7ma{xy zra-0_S=L3R7p}ab#bb1}&D5ePx1(`Kq1NYkprLUq^ja2-?UrS6n)SuaZ zyOq})H0ah5V-5}}0Rk2X<;y1lSBe-4+hSj2^b!sI z7L9&oii|T^A@DmM)K?xyHvBzQzkD$`foNlOAceoPcQ$qj4)5x+mfcvuQd!j6TIJ4GRx72oYwEoX^3 zy)(EHKaV{3!J&heTpByhzmsbV-h)oV$wHz__713@fzpc-lH*laWUR;Iu!99zC;KlZ z%Eb4Ott;}h)?a^%1`tqE8b(3!&nfHu@F_hgBX58T<3y!) zktpPqUhnz`4=d}ji>+ z=r3Z`@u=~wF#m{npgUpEfVD)g6MDOd&_n+Go8rw!UQ_vutLO3-2PtcPZm(LHfmWSh1hAMKZ?8}EkO%p5)Vj8C_ z)=O($mmF2?*kChf);Q&DaT&@aXn2t)9wf>B4+sa9IA5(;igI(sXM6s74?>oyEO>Lo z?tvtghkC*%LucXL&A%(RMg2p7MJAs`Pr^JYt`&~w* zC31HML?R0rzgM|FTi11J7vzK{%$CbrOkX7mJ&-2!Csgu0Di!sa@MTcm4_W9lkXJ>$ zClt^P^D405zz^vwNZ_rTYjSiia?0byif%+8(|!=mb-K8GbCZA@tI&pCKtKtVjS+oq zNAoIpL7c`kZRzpd55jcQw%P+ymy{YV*ThwnA^qtK+m}M;KjRyRzYDkD)Lt#4VG_Pp zYam{yLagMyCTy60nt*i?gGt9r4&bt|@S?6#)NLKpV~tdIv=y z{cYhb!tix40eYtNN%SLOil>Y7i{8dBi9iAmG7)i!@)?T>IjUn0%qL2Ud^p;6?HUD6 z459`t)C%T5`g`&nn;RlX{L}U;L{~RrbL{ zubf&)1RttPYyCvNQ;Nd=ly@f9CH3wUPR@-AU?&$d_le@w!IP8E z3X}p@{m%F-AoDbnrU(foh-#)zkv5@$Q$~(OS>x(Bd%^z}`4q`0NHyLbg>DD}hQLRu z@4~5FMn!8bcJ{cAurcbT0+{a45pZef#6f-#BDv{uYQdxm-MOeHyLres&sr|#7GA~+ z2SPBu>vWep0OfgFq1YqJ-WEd};E8Z?Ow5WgTxXsm z&nf(Z@`VHSmGmV3nddyeRQY9tg9TTaMZ-L4-nOJtY(XSq^70f)RVOFHgWc&*{y6UI z8viOl4Z{#A9?+~)kOxBak(uBD{~1IJAqkdl_#lh!87Y;Ii>M{(PrK|537)bW=i9<<8LCeU61CVK1aAYQ}d4p?}3Fgi$ z(2)LrhjK>!shIIrEhLHqfW$+f#SF#eo@BOK%pl6D)HuWz?ZPo&h}@E57D1{bfoLu) zPY2{XXq_XRAb$$gyyQDIfhy_EydhpFgxRdHAGowmyweyT@+ZD}ua7^E-(m%RM!5y3 zFn^Cafjx|kedyA*4WmnWL0LBkib=!AE4^{fY2L~GYl)AESOQL=*hwNkb zJC;<)S^Y(qREiZK=p>!f`|}Kq*^FfRh=FXLqDT5=SyVw1i?@j-+!<1i^ccY6KXojJ zB8-6g0*ePSUZp1{*V0hC?V*sMkOr%|)4FyOxabA=Ukrs*!xg{3VV0oaAu5Go(!T{k z!^E`xMSRF&8cW{`OLEMoV`-^`E52Mj4uuqj#?QF2?2&FG0gvqD0cokfFU9pX1b<=s zryOhVgGv+av-|-XcgYaPSxLX`UT!47@K+}u7Z7;K!K9>kgHM2xOMtBnmN1fhti}`{ zD&qiM;Rb#AVl7HFGZs5{Y#`0VQ|Rs15;ZJLt?GbXl^zOfRzC_9nGSdHi4GJ7|K@u3 zjgoF4r(Xx!MKfzYZjEl-wlA5~+1*uqE~DI{H`2?1Lf}F8U`4Xy%2(MQ4=i-dAmsp}J^xHlss7@vjQKByNEi)KzD6FmO zZD$N%Jb`v#g^YCz*`k6PYS8+yVGyGTc(WW(fYTly8;JD&A<}TV%40!qxZtIzLY1t zyaRxJv_n;|{*H7fQZ)Vp81mqsePiLGAj(z0|A7C1C}=?>C>PrX;`I;X)4D2n2|5g- zR^@*2cf{FpBW6LW1qMVrQBVe1F{jD~SIDZCp^#RhAyzCqC;=8|p3nu}x4X935ytEV4?F)U=M*0;wVj*Dq*%O^q!A z&Pzfye=s(p{cVkp6MEew5A26$A!1ECIk`%ErKU=grT7r}pJsudFmlt4hTVWTL%P$N zhrs$PI}_u&?c#6b0l+>xc|m{$k$i)U8V9L@4PTqb0L&0<6Mcse6#PNpzI0@b+AsGF z6>It&OQpSZ>&X;p^?gxfDoT_ee|k91H}ULUy)ETVnsHWNfy8v+qUPf~0e|6r$o5hy zB0%c|2x$25{5%BHd3nvhMJ`^Xgej%OJdz!Jh`Y_3e&|qPqa!EQnMRbpOaLPI6P1{x z9FoC~W+SujZ-~@ukUQ6HWQUN7(D3(`fY51s^^#-BD-b13g6D6SnW(%HyGXQ`CE81A zbWW{dMRJGDK{8q|Ij>|N2`yfP6mPfaO&`Z{F`sBl78~n3oY*Z(QOKZlU{NL`&y4tl zHB~%AKCW)M`}#?9lY>WtnZ&H7CkXgv(xd<6b%y{;Jx7^}VxTYSf>c>l#Fj(LpOmj3 zBEW|hJ?IV_V5OgS@3=x%p)gGe4t2zD0vMDqDx(?fFWtApGJlAcsPLrB zqo9@ykSej_ScCn)%kl@7$f!)1ff6Qu%q}ZqCsQ1>Wq%sVz|DAfUmlqbTPrz@K9R zXJ}~V?b!W*e*)~RkBI$Rc9g4ajvY5FHY}@V@E<8O@C7mezvBX?DaFclOpmS(RTb`- zYV-kC6gDe(7%OD`Q{dvuo@5QJx|-?GVXR{^Qc1~iw{x@u?s-NY#4x0FQQwbx(dTgb z12aqPSE1DJ0J%maz#W3o;Y&kR0a{%ku+nGXxGK6KuvhT@m0_>WI=PAv1QD>%zq|{1 zp~)uXQ1&*%x?0-br9q1^KobhJ+WHB0woeAW9~0*yoo$6@0q+$1U1bmPf`0FAVg%p{ z{pX$H0$-CmxKnM#W|kAnmrF54!Gcs=)Pa9PSMQ~{W8f+~Vk?3W?p1!2cQ;kVo1ai`wRoFk!wJTjr-j^O!R7nmW1 zc^ni%4XRd%{?kl~k=t9dBk`_9Juue&b6dF|dJDIHo#d2eD1Gd8^~RTIu|{l2(O_!d z11wV^>=0?qoe2IO@~dpng1ijE2&h=9g7q=XL!jY$U{?Y%*^@rwQDQDob)=?bvG=)> z#@wKoXiHb2LSRd(xgiBjwb-u{Zp*6e+9O*AgSy|-W*(;IC8}DpM&}UOfpR(%Bj`8` zMmY4Ui&%nEJcGm-Y#j!7e^)~4Z(LGtJ^Hq8c;qF}(3c4Gpea-Q5anej2kF)3f^AWh z&6N*#jzo1gG|XwJBtpvriWU;+K+i?oAm`PA9``UZEsGrqVkM>PUTqn*a|uT z?+3<<62(bV>X!iV^L9g~QQ>E?OYnWyOO7r#O$>kZ>|MOfSqr1_uV_3Ue4;t*QT8;9IL@LWXHh^V=_CJu{U&&_5k6k9w*Q;%wb z=0WiRAM%pFUl{ltKmTdLgnJWN_So#>?qTi%GaKZW*PwaxNeZ1JRea3RLd0d3b2(t= z=!eBCA^xu66|lv9Y1x48cuNw;gQ}($^*3t`x3(&5Og*=x3WpVDy@JK}>f%c8 z61kY%PeYG%?bRkl?|x0=;G{nvGx*H%P>U>%@}3KC>VM=0#DU+?o?YgOe2t_t(5vr{ zA}JC1aPkMitFnXj7eAd6$7Gf-&ZL2#W}L|N$G7V0ZpF6z2ODAJ-g2nHpOi(21pKav z;H<;=`s0xA<>`SQ@W0EL89Yz;Lpn5I2be!HV;e%_x47yEvFu_A9KV3==2xb;DquZ;~2(GZdWn_Yy!h0B1#9%NhV4%N6&$uJVfU;ALYq7r)YL+h8qbd zFO=5W6Vl^?tbz394ME;p-u%$G3b$FV1CtPFAtmv6o@uf%vDw8}GnVe14y5vOcJsc0Z9ATp`U?w zp!F?^WXbR~Vhqexp_!H;YGl_ujqriaZLixG+a!idD_?s{wD73!$87DlWCd%hzqK0hxwXL!f~ z_L7DCur(}TA0gu#$7x=+zcY%8u;MkbQ$(N@<$2v5*BP2oF4j%(x5>r##_6A*vi~?n zz~+^u@yYBn(Y?Y?E_~7JcAR~ZW(GRITi*$~|37=r!PqkovFOp!K;>#%XEoCdhv zQ(F##1)GDoblE6uxu~;>iMg)BA`z>Vh=I}SeHOJS)w)`Up};n@(emuiC%FP?x%3b` zxhO}Kb&+HmRIcuu0o@*rx-AYjt|Esgh=NEOq++BijS-m^JCfLqM9N(ho(jqEz3kM? z&(?=vjiL3ax0Kj3Z9Y-#SfwJp8*!op^4FZBR&4*B$Y+VaX!AiVOIc<$xScK*7Q~7;-YAZ zOE{3TPnRJX|6PV*7&-S?NB#T>Q`RnLp_^3J5II2yHcoSAeVDfp>Es=bZHWTPO*X10 zpyJb|A)2MKO-T8!>B_jEwJj-E;~r3%zuR|VF1Ecw-pPNulw&F^jvx8j3xRJ`J9;ry zr8wVewT$jYc4N`Eg^y{qeVO^;p9yY;GP?n897e(kq5pN7+TF#Nn*e4RGvg!^k3$9i zhb7_R;pa0+;pgfBiszXRN+!?#*+0z=XCp3BC-UpW3`eG8NuAj4D(qQu@tF>eQdFpY z-#dU?GkbY%IS+w3z{vGBZ1&bPV(Iqo>I<SJDJU^Wk_ZB^v+y9!kI%+KK5#bI|&<3!bmsdAw4MX`2PFupB2WGBZlRH zO0Y4p92)JA4vgipyY{nvu2<#dVlZ23pfJy-cjQ&g7XiK5($e>;pxkE2bov0XM4bd~ zN%RJmi0a=gPl+GBwhKw5m%!D(`RU!+q6PK}D$NR`KX6n-f z+35^f@}%XGUxVzHVcB2$qNM+PK}0ip{!wIo&(<1$FxObE*YSkfcoC<4b~&Zf74VFP zSlM23)R!6f+y7BBaRv?b-O#0?lERncY5WL*zj6usP3B5bGu|bo&*ijZx)d#hDm(WenZKNC?w}7>dQ*;z>u6!tBQknC zyqMon_hgbeJbHlIf_fw2u5Di~J^T6r!{Fluoly0i*+26EoU?1Rnw`$d4e3?UrWEm~ zU{HvOu(`=0mOY$Bpg&?>->n<9>I5jGgM_8iw%k5Z$AcBv%}x&IGTP#zB9q=za42Hp z**fiiGXCD@4pErJ)yNf#xf|I<%rg8ak!C4+XL-ic^5$`${+Dz%h4!2tLR#bZ%O|U; zoU4`1oUb@hf3b$IgquYgSq@kQo033dm4ecWJ$ zr?=HNP7VPF51UxzUCA9kGiDtD^HVsyawJ|oBPg{lS=pgK(r89a#qR$Y^%iEt@#ZaQ zY|cMk$(f(n5E>&(?KV5Vwn6442a(1HtAEamAa?j?mU|UX=c@pt(d3Z+!XrEH5QCuA z-IDf3p`U*d`HsIvU6xJuhy?rX-aL1s*{n_{pX#$riLMHf7yGu?V0%0=hIFtrVvOEB zaPWCtCskFBAB#>hzAFsUxUMu@MRj<*X3w#Dwe;9S?RYaOyCuXpr%kbAVpaX4pwCpZ z{EAP=t}1mP0pTF~rbYDcj7};iaflf0r=_JJZdm#bY88PoAjOlc(`C0y+e6o(+=Z-N zKFVl}3*l}GUJwZT!^4^Hba)-@b@8s;!O+Q~m{j1ETD~laLXq-@-VyWVyot56L6$*U z+8eR(gT*;y^xal3dlV{ECygN(@+RKzo1-8g#?+8+8JPWn8%Wwgx1LexJVc;2P*w&z zT6933+6L9B(^GE(#KkMo=OC`Q`h?`<9s41h^~%##^65;XiJGDo)_Myg!Pnivfel!> zbOGk&_ZVSxr)_8GHOub-=j`sY-^1ZJi;ap_W=#J(ope3ql~QSsV<}5cwQ%jcWE}^tFmZAqZX>XAq&yU!Ii^~@}W{XU8|GJ0YP9!gpa`<(P zp00O%+v#=);~~Z>_aye`U%M_%-DM8MdVY{dre0;64`on{Sl(SNF2G&BNcE^x$ zR^C&vNZe_6&6bn&j$~N%QlFkpP_^#Ul|oKb*Gi@31i{|55O~L3F$~0sVrd5OfJQ_u zWRtc1W1O5kG_GQts`Xk2=XFaRWa4!#dS;)l4lI_?5G<+#gC`^dT(5UjGJ)}1!U#sa)h=VZ;!K9xcgH4>mGSh!w8{)O#&_-AIoZhrxIUo|Y{hiA%bk z6nj2-$fzl^g1i!5XxJY=HrDo;D4t#QfpX85w8>Z`HZVRJm|8)vRSxVfRZi@#l@BTw zG)T&0z|Ry)9os%yhYp#yWyxg6w~et$@==bX*C9GLy+Fgq8xv^@;BS``_eS^L?_yl` zcR*V@KINbEoybKUkj%}g?HnM0c{6Q5L+PUV4+p?8!N(~z%9f1~7fTm^)PVu<&$^cc zexq?jl2$Te|3}8}IHg#xLAdDedK^$3)alriv^&iBtDPVp1v_5x^KTf_`Qs!c7cR)g zYD`C?4;mHY*A&a;9*XOT{=pInhu*||%84K}50BlG8$e~44%#ZpqAI5bq;d?F*zR^l zPVQwq&=~9@BCsb~8Z@O}|H#6xo&VT3sl%vUkDmCK;c(3&NAC<*7c5H2lqc<6Pw&(415*xdN60FUh;VJL&tTa0BNmFJ29!~#@!`xxX;~F!DeS7YH!&-SC zS%b%6fg1t;)GN~)j>FPb@J8%|t{V>;Oy_F5wC18r`XjwihPDb=;c~L`)V0AD`UaU* zVdMi#^WmSgz`nNE{XYj)#A=B(#S}dh1DG&h7bnFoG}U;Xr}*EHHu^t6f7-We^SU2T zQiWo023Fvs2LFEgCs~a5fy~t*Q=u2sh)=i5lwfj?%Jp9_YY8 zba4c^;d6xm#(At6h~YWiO1{bF5KwBO7D&&p2O)zE&n%&l?s8FYi{l{nED~x1BYIMa z&oC%FwE$3teaq8T&=UyfuY``ENvBO3lv-?=Af=pfk#Q^W5jV*2=$_^}(73U!g?ci` z>comSEq<%wY16!zBS}6@b9!OqXse;q6JvHu-L#D%PItiN)s@P4F$rZ<(8(U{~ zn~)iLJA5e5V5(jc4Ila)^UK&7md^+4t}~sY6Fk4E3Fa8df7p(iNaxjDz9TJfhFVyb zFzVRuk_pz^OdMPTyarwlzh|+*LA7x5_p!AQ(FHyd%p_CZk5OlfnTm*2!1mKMNmbzK z#?sAJ)Pa(nbw!LWcS3!q8U;)N#w)p~c#sVg3C2i+2vqX7{4|)lI2z&v`qK1mpr`2B zgn-tB2hg!ZR`u_G#M;80S?~&yf|FzY?-e^|Ax|*X{S|J=M38NH(uT6AK6ic|x=t%; z>pBvHVYLKxhbSpofL_GqKYYf=C!cJRVIIdEsHk0ftE}ANuTb6YuSjbA>BR~1a?|Gn zQ4xxLC(H4EW5V$f{xKYJqGTK@f-&x7!3ZgORCjnjU5JG&+?u@?j=Z&zq=LQ+B`fED zmA*;6H+_Eki}vw~!*|6K9F5Z;3ZG}{IgCo8!Yzktpkunyk^|QM7BzzauUPzB2X+M) z6E!Q=nVd+oip(*%Hk?^1ZrL%`}~sc!<#{0KIg@xhNG=Oo9rP{OPIbd_URi zjPC?VhTJr?s7dsv9m_l=nETQ|Nbg{F{v>T4gDlOlL1tTy$x=0Pk$me;lB5sE-*Q0w z^UhSMp}03&SAPaeH`LADiuH)`7fck>ZZcv8*w!_c^xq9HT3Xa{#8t^^RBWqMmud_@ zZrWA9SJhNhQ?>_#wq-8RU>W#sr;c;s>vi`fmq$Mdj9WS|BtT+n(oSL%&7*3P1vJkHe=vGB!I2*71PI{TuIXO-*0ewWmmAU%lVqU8LXe`d5fNY+k5tF-km>r=+G}w1^FNX`SdJm^He! z1qlvXX4$w;{pui2M#6!TW$=i-8`Ux^$0xk1HK3QM9kHLRd-B|TO|dGDD=I*ue7EJx zcNjIao_9NEV`%%dK;cr$B&$cWzz<}lsves~M{~!Zm*O-Fui_^cPgD8WxgZ}GbP}wk zEFnCBkq2hW6M+YMv~ZwV&SAj62W}h*Kp8sq=@CNaIYmRm?dioXr%OhV4@%U(%j$f1 zqsX9IX*XH|Nj{y2=b64-!zKg&Itp|Dhf`?i#v)Zmngz%(xIIyp5OD<$Z=W(6wl&!M8UjUGr&GBt1Ox(DqimxKwDD z`*n~}9VANRF1!|^IZ4$EAULo9jm<=d74!n>y;lMn?gcz75*&iGEE3Zqcsj5OT176! zC{?X%DY?U{3*J>YD=2~AL-l9O^PpOY(}2`h@Sm9>-Q6;2OG@ZYJ|VemJGvURY-p1> z9l14_OA~?P%}=moZsFKv33lqdSy_-^(2uRwy85%V>IUnrA&ZF$aw7`sJ3Zcq_iX_R z4lx-&uOm+@P8pWU%~G}w^l>?Ai|AG=pWXkd@KxONQTA3_W=-tUTph29%u?4v5=6M{ zD2CJT$jxR$*^CBH@cY+)zGnBJDu=}uCr$HKhdF&3Gda@;N((ORSvp?rj%{6j8!+(6 z?9+{tqjMYzfmQZq*63oWPK7II22Mo7#4h8!l;Oy z{?c`}_*GQ}8^oX`={7&f^R%t-FfXb%FK&1?(qiN@Uz~Q@zN^G2fIy}-3}7J$igntA zQ|hbFzm?*Da6k(~^Ucm4qIt>s* z3eJbViu*3@8NP6C33c~>_8VCgZ@cL%wSxWy_mX71?PIz*HVINqt-_$!Ua$*&7sdx8 z4~mNA)9urDAGSZwwDIehP!{uuZr6Gc62^Img0W`5^O4Ta9Big_C$JiDCn;aAlo^np zvOH-z%|p>niVl77T;jY&y8BTOWCUmZYF%^;nNT}^!OQWX+;RVbL`}}J`*A1>X30fO zvU+7nE|RY4{VMBW-|T#SC$%EK%eZDvppck!3x-4;Z?002p2D4q`oZ=GB%Xj0H3qPI zZ8?(x5D866lGCeG0p0^`*!gN2yq7IGjq@lKYHioZ?(5{SQdGio6nOZ9L- zwEG~g4%kD-!q0K$J|!f;3H#4{zK*NrC9=2?RRR{Av#Wy@RK#~lzfR$# zw3$0vc#h9gg)*ta($8)i?xe;5z85^E($)nx666s8gbnz}Gg0;lw zd8ZQRdv^jPugyQBn`%8_k^>(Mp#HG&vG~^9nM6F?QzBaH4Jl!OW5UfX(w<3;|FoV` z@sW}^H^M+&<=PUNA5$p|4)8vtvvzf~2z7k?wIX0|T6e-ig!0PWrAUU!`VrXbW@-xexx3CJ6yWVUou4OS@_7k0VNgq zg|~x+6X{M~T?@e<6PDM3X`KPZtgX-QO4*M8hDS+amSY70L8aU=#ZF!M>0Lf!5quHC9n)uJ6w+S5&vZ=Yt-{H}+QSw=dx23RNAGx>5XY<9ZL` z*Hb0})BAd@$A&?Lq{fO9RJCvFb#_&g2`DK|te^rhCHi}SC3o}Bb+&IX-xUDfP{|Vw zL*bCnIqUUn@gdW)tkM3BLZz=~3~omZv}jH*jTh6=;i=hF>hU?v6uzn;X}HqaBIZ|B z4K6k;?3fHmc1}`h2)c1tHb^je)-maznQwi9{$}%I6$(IR2)>+MWit=k&s0rP!P*wz^a@8m9G`4@f6#UJ>fVv#bKnRr-sS^y} zF4^R^*{#fbocUM7k_=!7+xp*rRpTlE0>t!|8hx9_X!1euL;Fmu-?WK=_gx65hM_uQ7MY4oXl}&>BCO*VlTchge zN*%<+f3y74NBW6T$>vPJ-GO3%uteobCS=K32+=vJQK)HG{pMihPUAmKI_C@Vbg*l6yuor_pYs*oNPfZ0NaS>Cn?3-3wu$U8b2@);*TfIU=Xc*u z&_*za{Z&HTF(EvVo+X;|SA0^wJ;Chnt@$)mqqrCs}^^tI6njkK4XX4VJant9CZ*HTKz@ti-vP#G$X{%cDJ@SE4TNkqNvT%D1 z%-7}wlW6xvbhj2AyyIZVEVxni2LUxkaW@NjyQL!-M+gbmpm62(|Fr-<<3OY@Gm)w} zlO?C2DPYZ&mi1IL*7#`5k$%&q7W5bNP>vOpA&pms=at9p_6P@jF6KPF5{|jek*9$H zCIFUTL4}{&=+nf-(sd;HT3-zBXaNR&z-W_8hY&VKdb0N4AX1M@V1H~5NP74yT0fM) zv68qP)i5x4wY;m_ZvVTmiRA?l0}8V95|sXN;*=R<1naM_rOa#nGA&ws(`E`Me)(No zn(F>C;p^Z7eYS>YSQJv%uOvW_mMZ=t;F*5EPvzSzGcNXl*zcJSL-kj~@BYvOuN5se zBt~)5gB3y%L`Tu}PC`efvWaj8%GWdiPP=?u!FM*G!{vX0E-A%|8B4Rr&t~EGn?~2> zDfvs4BTVc}wv}oalydVwEKyLaGJ!-b%2T}PZv%15k4mfgWETu5pDd0BhM{h3-W-l2&-|Uex^OM|9<64o<`M*s7 zAHHuNPeS(|#uy|~;~q}pfDID<+AP0*A3QW)WD+d4r4TI^4*lo4smFgh7=BV5w>R!f z;;YB*S~yL`e6e64)GKN33_MO%1)O~Jq&E91ap{9d5yu3Oa@&}p=ZX+xxVxPC-u(VV zT_wn7wS5gqJv+hi1?UAq4O+TC`8wv;ZI;4)4>T^;!vnS#zGIa4(d}aWU+dz+0%hB$GkvhLxa9w#y=zi*!!M5-vumeK zGiNsvjNb6lFq8neq{R7xo$h1Jdz-L-6ujWCXp*H9j<4-V1sEoqxOh|nO$6dn>QDr$ z!a>M%jM!{>!*4``-@-S6$?+8Q)NXU=hV}=d&u{L;gmU*T#7xEuu^cOaRe(Nhe#pH05FyM6gX;ehew3bBj7rSfmmgH@m z$Zs8q82GP;93V=G&N^%SMjkty>AZnxcov>|Z9*nWVyzZ>&hoJz5>+W5wBSfw>T7+3 zUct%a@MH6Rbe{KpY`~&bX|vtCjki^?jji}s>vg|pvweHrs$WJ+GXV(DjtfD2o4v9g z90P!m{xDa1UII~KOHbhBGa60$ZDj{$NR9cR%YO^uA2!24$nyH4LkdPg!0q=xUtagp z2|rYQEQn9WMvK}u+W>B63)$+pPPY~G{+;OkPQsKswpI*G#&@mN1j*#TIW41|230~m zejHPu4YO6gF-uZrAIB_CbJ;$N(gDY1@>`~ZpDWQEOjsx*kN`F9LRljq32*$E8o#5@gdo_?inHeHDswxJ~kKwm)V`CG^6WSa*;3y_8~ z`jFqf>KM=R?o?3w3zz(6U7>6@-WT;23Wz@!Xiu zZsdsFq=BuIKfn($x=pmW!Z52mN~=>y2S&H-{!OK+)gmh>qbTcB=w|Rsuj^d#KmvE| zJ-!9hOvjXtaD%q8DFU~5ez#^#ZwK42b(F?N?bRMPN8f^u;o)~9@#F6wE}V<2In7_O z$bVCHkEX_u39QdKP1;&nKW{h^txK70?Qy9>${)lh3R;7vcKESB_$)8}MVJpMPEfC` zeCpzzS^IKB?h$-&&#~@` z!#P7qAoqkp|7s_l-K5{r2~btGo4qmg`fi_`64s*;(yHFlZd3ePf?8J8w2XYKMXcwk2s}cL_FB%!}RD7Tiu`# z=REpb3H{p7ql%3?#Non+%-MgxmwxDhzhu{t@<&780>m%A?nuI3x=h2Nu6m=n$Gt{L zLdHVn+B@`b;IVQBU2G`|B{Vz2^GQFo-q5pg`3^Ry#F2K|iEm+1P8fKy5 zU2I_ZAUXG~Gg0}3Uwr@Tv?QJsi^SFUItS#6UfLII>I9^0@Q(E4F;~HHi8_~QN$3Do z@3;J|YX0JxECpXsJl-g*g7eMIDpvJWNk)ox=8(^o=1XrB)qg8Ks@fU_`CqlO;&s+u zdj22;eYzZw{+!cFXqfo@lZuEoB)EG-&NP@ZyrARZ=jR$4ad}AO#B+knh53^=DabYr zOudE`#4$}aEHR=l%=^KdKHo9C_SzIAyFde4k;wg{OV}CSpkl0a%;WXlOLZiu^C>TQ zhb4-f>#6OUrD-L7+J>rCVBcw6jE-A&4pe{pN4s)}qp~s^(P1s_g+&6x3gXu8=weYM z12329{9O_}2|eT9lTH)*g^bsjIGP2*U)=pXdoCB=wntcb5EDm`2Nw<#^OrQGp~<03 zcuZ6uE{0OeeIXNwNL)qYPk>tLLlPYp&39h37J({q<-~ckLkBn2`}aE6(fKl!%P9DQ z9^FqDW3rW&Nk63yg0Unn(=6)e*{px&1N(vw3dmbLDD7g&MdnWizsTooPrPoeo0~a> z(B5TR3Q*6g$e*Ge!?2OSj=09e+j6Y`ny7>Y1}T<$JpW@8dR9!bqMLbG94FdYjcYB_ zEH`WQ>UpzFzo(Mnm_2q+#V6H>2aI_-q?1=LCC7X`U#ztU)g$Jq#i=XjhtkhjG-Zm* zZ6eib6aSB339h{_w2ILWyV)O@ZjP$o7CXJ12lC>CwIYZFwIW`LENN_RE`|pFG9jbi za(2c2C@PWTy%ZejRys}+vLWI?;E+bt*A}2bKIXS&&A4K*;#M5pVYHm1m3_u=x1Ond zRd#XU6u?!^Rq7`?fOIvJw}=#Hu1$o;qE}4Vaa*WW?U20+b@$vQ?GZ7B4669qV|0Y* zI_4!LAtKwi%@jXes0^Z;j3a9w0SLIo`_wkm}2jS#wfVRlZfsu5qJm zy~r({paY9$Y$I(5z=~{*LE(O<^FcJ04oF^w4IiQ zlpgk@&(I$wDGYOl;+yLLsrdsU;aPVF62~z;<*K%&^26Q{@r_sk0sKu|3lZH_283s( z-?!m$2}fuW4syh@TxWF9l+Gh3=fU!zs%W0i65Yqfk_x;5z|~6|T_+qih|Y?m6r2`Wbq+G96?%WVIQ5t45QQ~>rj`r;qQ zCFulOuZ+{74Ct|${8Qm+d}^BN?-ztH%Z~`QPwL^odf#}`@H-yz5?Kr}x4sS;hQAip zQE9q0XTkA8L6!-`o=5;`p{#YgBcxI+U{6YwLl!<)@G>QBi0+MHH8Ipv#1EmkJe;k~YR=ajfYE{Mi{2*>e~ z3!m6~Klx4SUhp-Yz96p|=7xg6;)b@5GRuiIBSmTAR*!4-`4v2}g;-X9E#B}2jaw9n zZJ#)(QE*`*N{HUMie%7lFE)JpF8XNtiiFpimV{@of2A#bK8QIZ<}Tl^r_z{=1ygZR z($dS*D)gUaxRHP==_D0pInLxmXZ~la0%;I&^OuWW{Ah$UyLFXZ>dZ}*GfhU5@3`5t z>8@o4S-(H-Ys`IR`)%A@plEWzYBQ5-afnjDW9NEu8{slNOy$BpCAiF_ zY$i>Xe8IYz$$EfG8dZ_qC@*3bjzoHH@t#P)+n*zrm`B7(h|r7yxH6q@BV%$eY%wFM zd{o%1q-RoZx3DL7Kq;$CMkVc2J)%oh=tk=E)4g|B&Dbm>107}BCJa~XaAqj{U{YFB zUA=Ueu3V=4eyCZrbh%+8tKfcBa8=;u^){#2u7MZ#B)dw1_I4TdME@G^n7YhP6I$fo zJeH_X%?|H&H|6cJHF{R(#{ykVDc!#`U}$N%^#Ji^Su)#w(EiUcEuw>D{D#;St1kBz z0j%25wm3P%v-eSS=h~>fHPsngU5PSYiRR~@HiopcH@Y;!{xbX9ox2NoZ$rljPq7(y zy*$D_Q2Ks2c#;zp526-Bbg^QlKoke4A-|y1!!ROFJa2QU z=jFkN6lgpIFNv^+v(%v6Sb+}Bk%j>rPXZy!`O%ApmqU$+e;X)s5`tK%)|)UHuV|F9 zglS?mtd9QK-X^>P+C7mPew}G;D(uMLA;;SbYt}!XV5PaZA|iy&Lw{}J zJDm64J*Fq5tg$fK?-Ad!2nU@EpdxTzx;^CbB!+KRzc1ojHq?}jYzgpu_Bs)EB7ZNi zl7Hg!9$;KS!9=vYqUZpH!jSf0zB?5(T)Ik{5C;@Oh^9Nq6vL|ZCQ;_vF(_?wNa z{Q(7L3TAQ_)1)3Lxh{-Mv8m)i+f>sKr&`%O#1PB2D`BlmGX1P39YZIh7#@#9sxh|` z0jZ5wY~;>C{xd*E@cKyONY^-#rDZO9&rN_Yw!M7T?dwbwkVvjj$|g>-$^LLGYk;it zj8@(};#~IYH^*?dM3V21__t#3veZ7}CZs~Foqy`Cedc8bHW#R63j~i%$24&yyE2(o zch((wkoqO<1b4gX@-7vQ{VQFN-~8K*i*qzi^tTQ2Uq%%Ong7&)M3&A%B6M(sCak}H zNJG{n2h+I{nPkm)d~q`Ef6F35HEXf1PcBeiZTsT{_9NTC`ZeoNbwMqxwl@u zSMPlH*DAW2?%r!{nQP1;UrQblVL8HXBHZkI1m-Z(iuL2tPw~m z6d@$6ocL=eZbz3$X&EipGLbr(jCktW;xVx}pYT}hFOYCoRo!*TQEVA(P#uNv80O?z zK+|h^StGviBYZ_rmen^v?R5#%VezoRlRM?aWh7uj4D$^Fx!%n#_{Deb{FQ52K*L(x z5CJhUSIV7{Kot>a_Z{}dDx}rt88SknayC-nLlgGTrzYz|a=IBr8UffW!0GP1yu|wj z=V7})DQu(DQn^3!nW`Jjwz(Ie5$9e)uk$x4#qxm5Iceucxps@J68TZ^MdmzQ?KRD| zP)D-)A*?v7I9f_{9X0iEh9Z~ejxtlGy}P9SbmZr~ldAr7F4L{PHjR45(N>D5tn6Uk zu7L9EVzo!RIuBuG*He+w6c-2%+0cenOM{6sytN=&#+a8Es)U>ssKNS0CZ7h=vRn{M zo*6lqhgLcyIYWCR%rl*=rT(!|o=v7ocjs*&E_4bk9N2e&jw(che*m*HC9Bv#+IZ^7zzT(iWG7)ZfFS-$@_GQ9mO-Z^rrE2E}Ah zefgi09rO|hT8o~{2}>)ej-;=w#*<@D@yijiVK0?npc*Rx;By7YGK)DtrwQ*lXz$3lp zVkh7^KhuG&H(sjX|-a3|?haQFTXg zN0B>=t%pbCHFcxYsN0RBMq&kuf8%eI!M~v|#NVKSParQSvq@73D(|fV2jfg!WY6;c zSbp`0Cs+Cw7D3*t@I)!txCS?QX%D%SW`0HflW)MlKvA}|%;Zjc{9oqq|5>8{_W;8W z0FlYq-=4{J%aO@^gSjhh$w(S%=%1zQ!<7I=*6ZbqGwlIMSkwOXZiO|2N;$Lu9%*DM z%}nGX`hSY;|Gnt{Wv~c`$q{6Oqk0*Hrvm9#aS~e){>yCspSfkg1#`(_0$9PsJ?rFv z+zfjPx+;gCCCC8xtBT)Sx{~ILgl=ngn7V=$cXh1b| zrhsyB0mUGp9LYZpHw+&TlRoCiL&z1g0(E+M!W$ao`u#wpalbQsdx+iX+$PShtTT_QhW6Y|tIK;9?=TvafUh@r^OEs=vuT{@Bxa}82SLOHQJP8)ovktfN zg(;U#snwkogI>2p5)mJ&c8C!Bs$BZC&0|gH-?PsSLSO%pvE%jCmz0VH?}Nt$DueS1 zp=P^#QGodMFlMUntP&n#N;jjHAf5oW=K4yVm7PZ#H)D2_VC8#`k%~Lx;@Tr|^Ckz1 zMY-2N8xN)g<(u1_u7g_J7QtTJHEOLo7I8Wl7$m>K(zyp-V<23Xb6Ax);cb{Dv3K(< z!n;lLH@w$J(c^eZMV~D{tya%wqc&X>&p)^*tfmpjRPIy!(7~6ThY4mNxTz+F(3*%=|z=RT9IAuHZX_){_1yYt;!g+aywqK$*ndkrIF86bM-Uf(nxygEKf7%k4;} zUXwACO913pjN*ULq_i5x2_s-1t#LgIJ55vnBgN+?Jj1zp)C$Vfoq3uMf;fukNcsnA zfQ$`LmQ`A&-bin$_YFtBFRRr+>v}+DfJhp@SIpxT7ItpXeluJ1SV5~@PeXX}MO|>M zaW%c-+#lU-n}r^!q9HYwwcgc@tNpqEe3i^34uUSc3zm#-$M>Lcx$pfCRMqoKh!RdX z`Aq-&+3;rN8E`FsFA4K^c%0*(%KU0|-aiG>S<~vzVXnl9>dC$A@RtGip&Stf?SI|~ z8F9E4Mh}cxNl+9M?GsaCd!*J1ti+-^w9<2{&x@p47J&L4nr=S#N$PD@m^u24zx0eQ z-}bS?&erkCppcV)TS!Z@*oj}swrEEg^ha*GLxVV>(;IBhl}# z;Io_F*knp{5&ve9WYyvevC`QO=KMUD!U{R7$=`#6aB#|gJ~;Cr__RmBEb16XP(PgS z1gNOQTV2)oOG^{x85E7gvB&MmY7XUUIofRFzY`-6FudL0>$qZ>6BnXP9#CsfOyF}Z zzTRpPcd%Uy$d-uREBki``GcCnw7hw#1(t1_1zrC5S@wJD_5JP;yj$cERCj_{h5Ncg zc98)=7lpJRFkcHbjNEV%zRnV?9!nW}E#m*V=d_kj_JL-<@e(y06_-uiZY8G{b`GbFZ9ge%n+^Oa( z+!uHOwk%dd|7L|&b|h8NBd=@nSeTPWNxU7UlQ`zwEGfa9cQ8}UWX}F)LXa-r!(#f= z`JJA1Db;|wdnKcuP!S}0CP^rj4b3R-x;cFX3fy3lXay_sevuM2T>Qh-7E*&Z&+(Ew69)f|wA5=v4UEwR&X4_Kqu zq5Nlku3^~s)SXsmZ<(_7wqw1ncPyT+Yp4RgX9|`H|TpnKcV<&r4 zW_@Q2@NX<-mCwAU_*#P(F@_s#Z-azQv|#TM-rY}hXf7*DyjWuorOMR>7d2NIT2~jx zQ{@T@9mH4}kGW1(Q@i_K_RbqW5ZTN3+@}#8g(Y%+k;JMbLw`2kaC6y%gg7UbQA8$g}o6H2&SjCALaqNjlsN{Q%6N>mBjSIYF&yI<&7A zC|@jLbDEGKJ8NR8Aiz$;`xZWrcoIJ@eOVS7R__ba?V-ZTUIHXm5D1Cf#U@6lICaa{ z45o$;V~uyjv|`Akggk{v3cP~8-0o06%Y;ep`(8aw^)YC)PK7pHLBve zs(+d5YE+Ew3Ipaaq1GCX(o8IT@_2>pDFG1zz==o|LGbA=PYhf{X!N;PK^u>CrZ{g2 zPo6`g;GOB`s-CPVttHet+l{@1oNB|sNYV1vOt>4Jc-VG~r!SVN?@BjTu+h-42dLYy z0uP{2^*Jl{r7KL5A0glwDCnE-N-z{j`HF(R?I-BmoOi5=LjC z0w8X^_I>OJkv2E_(Tq3J+ULZ+#|;b@f5{Gl@UOF9yLBAea|54+2ciT-ph=5`N2EAL zvl01^@_TNPSRJXFnaLRKk+kjxBA4O8Ink#(NqX?a8e1sDqC?X4@~EF}DxjJ5s@kl# zfmxxo5=!U)RUiNT74L)OxM@{&rLFU7jA^2;c~jj&*xba-3YxJKgJoLvn|LltT1qPA zSjt!^o6n02Zu82GCxwAz7G&Nq6V3J5*4tub^3lMc86_hl4G3coj`~|qNhBcd$dc?Z z!R`>|#qb>{R^K?#eo=-=p4|otrf&d)5SfsKL0jtbR)#Uo=9&@Gr^wn5yK`X&xDiGU z5?>=N*>?*+%qg4}WJ=!ONWlaGwx7zY@>Coc^y%FtK`i=HD=#lazTPXCr0@`BZSBuP zT*%RlUY}WMi4#IM3zLf07UuKUTXXU$Et01nOHT+- zRxiA4$#`BFLFO9BpOF|a44d`Zp|$tud!FoLN)kD0WLGCIkK2OQn+a3UmY=%ZHbu|a zckiY#HM065J_7-;w*E0k5({4Rf3qw$1YWe~ff+C_pyq=$Q4iDcM&Cax=fJ4P4HYFI1vT=$ z<+n|(LiIBt^Xe>aIYcxQ{p6nuYBEr1s+s~mcY)BPpZ81RT1$1NB8pwh9(;cx;*b5iS>6omI_}j@>ZwvZl^0+Zk<%%^F?WjPNOSEJ;S*%$U+PcT zgJPD4VnPRW?AeW&I#m_2*~iYVo_5R9;&<|&DUSb>#Js`)utxqsO7U?{h+#hJ05k7< zRnZUXEqxC&g&eH!04pH;({y+dFWo-}m3}ioCxR!K&x5Ejyg6B*k8U~%(%JX=#PlzS z^ZLdfCwLN6L$7A`-56<2c?wgZ2S#}{A4;~$a}~Zix;80%{Hm}$bRN6i=cO1fYQZ6& zwn8Q8HhyIEjtUZXLMV+kxy=MAoajOg3b@D!?+++#gJbwWv{q*4X||shZ?=Dy4t;av zE?he_=WrXJG!wo!6)3>~YxS)kpR&L#As10Z(0WVoAZ^0or64SzGetSqa()*UUa)1@ zQ*1azq6!*ASrIRJUXUIqzdutmj1I^ye|4g;D>8n8QJw~?FTGqZ|F?A<)~a`Yc4|0{ zKjmVUU0X4nWVfc6INFWA%_U`h6B`czWKWjswafgX6dbfx=o0{0sAdF-Z7+G#(@$Be zic!yMThZoNMmR9q8hSUPPF7e?A3{jvK$4*_3W$hUjbX_3uR}VfyJwqExwQ5)TBk&J zh*hpNeWNjGM$+wmQ)Yjb>oMiTY|MF8m;?H9_H zhxGVXw*#N&_9!#?p(q>E$rlzfrd%P2qBLQb?OK~n->3*)h6H;;3{GreVRLrL!o~-t zF~-vphnp%U6Z$ppX2?%8F4nCn11sGwbBJeWljuyfW{HIA8}ZJi`dr0EeLmNPvMKVa z5Fz>FIz1o@M9u7^l%o{Hqsoj`B%Lz`VL+CqeIt^(ErGe1cki#?iEUrAegoqj;5~%q zR9F1vZt%)0T;;m%)q5(BF_+q0Z&K&m+^9q68y9O&BFL?PY86`K+3rJrU@R9ZRbbQ` zUQ2Q^@d^dr{AW5DgJ;;Y2NH_)9J2bgH!-E%G&R`*KvaHm4+FdK5l z;1@ut6f4KyHsn@&5ZkCOQ6WY=BJ37kjxoKYSZT@~h-v{3L%?7DB*nB%PbVXcs=)lQvd(LmiH>>a+)<|aX;NTHLuZ@3QO~$oiP7CKf?s+P z+nR3b7$hyIASpy|D@4)0d&dHTfHV{l(n+T0E%0AWj?I8nwj5?D0gS{8?N;T=xIOayU$_-5!Nh~1p2k7vRtiUpoHgWm6bH_#bspWo8q5`L8!<%f0er1KQ9smRjTtNH6k@Kj%|?<$iV#6)Sq zX##if7%WD-eG~arK?yS&D8T@Y+zOF2iC(K%`{9kVdQt7_pbNyE-1ghYQ4esH*Z7gh zvzTtFuKH%eM#E=_$IBA37LY(cUjlRgz;KpxgD&Uvtza^XvF1N=Djp5gE777&{42NIJkzT|5cb|fbAXf zpd{;64iCp+^Hv@o4vYc|;F}UFOB8S(pxW-rN(P0*mhbUEAwYKOev8V^@U!uF_Oko& zy9uiIi|VCF|CAo%UqRU~8xBUg2_Udpn+{O#=x-Km+#a1QpHa!i!$VhoAhL11;kcsg z;eKHC9z?WDY5!NLrhQUcDxM!q5&sL%pbK03fNMY(4S$mVQAeA#O-Ri2(6Q-{B%&qK zBS>`Ex(@I(o2M()%Z|1kXj$Yj4ryZ=Ma>%LaDvC!27f9m`H@>8SMF?7i(fEa&7RyC zTwl3qXBEl@tPL`OGlV?8J6{Tv_z_Z@$&SynCHkIUu#O#4BQ1WQSY#3Zw?s!tA=`j>w`s|3&+-Q&bN!IQRs7P=gS!49KArYCi% zt6xRLGLh0*G>#orlI=(pX@e|yuj^So?J@4QQ~<-zb>NP4K{RYAD=^LBa&&9wy!$&P!1G^ zv2%@WfDq2PA%%{V{a`XEgc^g3hv{tl*C;3OgvXwuJ`ZrCqh5Y zRTB~{u_#vP`yZ65w5(qT4Sa3YHT#&)n%|>XP1f61w)iA+8p%-PL4}Y7Io;TMM=mqh zd;OFgJ7YAPFMW%ZH&!KJQ7Js4E>E$^y*_%%u}56`H~>%6}fZE6q}aKTi>g^IfRfNtWC zIWNa^-gu~&MMs5>I4szh2dtKLKna&?s`JCD45){7{k7Y6K3JDRey_5I+pXQEU*^{B z&Fs}}7L|Qp;pFR#&5a}8lu*p-)N5LJ^;7Ky{;EK^X!OlF8RFV9rXz>vekJUo> zPn{idG?Hp5T%L(Xqybj0AS^I)=i03y2{D3mB;2ca^!dGw#se+jX0!+?pOXa_K<09T zf+8;0Xb9;#APH6<71H|^)V?SM8C{UVHF{%&g2M@XcAPk)CO3M*ChBTwvBb#HU+s7* zl^e`2mZub`vDp)SgE>KHjp2u4!=f9I&$e6H*#T^LsXw1cu6!Mb!^Gz(!PGadpFIwn zY|PyPrLzhd7s-7kQ(i_|z$cGx=Kps^j(6}`dRWOSfKnT0AS4Bh?}b%PXGCw}MmK%t zMp;?+TLpPao*_WH73HL0K>CsMWNE#;+V+-FND6mV1aF+4%Z(*Uha&nOOWo`PJr9h> z^x=OBt)uqMo+3~-^B`Nj`O();XAnEmFx&?63={B9N);C4i`Ew(*+jMv5H0-ca~SA8 z31D+`S7~Bxyrdvu03t;R=}7t!Na=hx|FG+x3QZ!mbr$k)!JDqqku12^@L7AUw}cHW z83$p~1?9h*1|%!4J&DWHck?Vm-8hCs{rr3!Ds%GM${N0(lF=C$xni;ywS`N&4{jqi z!ZDweO}X1aoV~wT(q6on23NFSF8<0RTO;iHaVYrINaO@ceXC>aCO}{^AG0c!4+-Fn z9Z}TObYRdmIYBF zXMuEDu(3J$%;7RaDzn?|*mw`&FOz*8R0>E4d13HCq5E(@5v%3L%itk$xa?9-&UjN1 ztCD6dzQ1_OlADE!CNdKv!fi+T_H>eB^*$Z4y40Tpe}SScr_QQ@*e*r=U0t+4r=g4X z+(W`&t}p9--?fnElWOhaW&67jI-gSPTZt~41CMu{okhKF45|e?41z2n#i5++W0kVG zy^bO^aW3JPL%a9I-w%TZ6I%G)ckWWrGQ?Dtjg$gAmqzN?Fx+H>9}$k)x7|6e zyfZFx&v}3FR*3)R$o*+(Ajhb?W0ge?P)-g;GU9p8AGGvG>A^0r?fKH_wLp8V{Lj6j zZju|?2$V1wL&Pql5+9F%?shQqc%e4RVaIl7;HxIPlovF$iXB?~E2w_zfn(S2=FG;3 zLIkMVr6X19jq@$1%Kb0_IsdJNYmS~4jSWfHyD{$Ru{+(-JR{c%78VZ9csyP_a(qWY zP>r_9F6q3awl?+w6pCrualF7;_2Uc%=}uU+$tbu)MnD`+@OdDL_mYoCV>FqO{g%jE zHLj0DPOf7gi@eTyi8l9l$%roq`U?}nd4{oCAHfdIN&w$;F8;dWQ)`|@GohPFr_r}v zULCTxY8MD|Cn?t3`aN(vL(qUBgI@>cS4BA{uPNNXc!p(W$eGkZrTUeHnNG2n3Fxx@ z5%2|;8>;XwaNkoS67`aPe10;^8&dbjz%w(gF&Bl{ACRL}M{V_(3hRbf_XpU~ zSJ?bP9uAjhi~&=v1nRy0dKY! zaCpBr+vhOAfAHA$rz*k)JtAYJQ`PMMIjHUXJZjKE^Q2w)1*HB7if?r$ef5X7ie)B5 zA|&Lu1_B;O=HlaXz+X5c2)L%HJ9DWg_s6!k5II%VhQw=(-+q#)@T>)w&=MNM0)a9u zx04d7BhM&%=n^@>ActWA<)|s|=q2Y(%SHMy0_L1vi&(R1e^6iX?si`^Fw59U^YT1w zgBFF?7LQ`X72D$b^cJbV>ao=$8S{19Dam%VZ)4g%IjHB@Kn_tSgVfsye*Egi{0S>NQHZD_fXBe{&+~c;NT&O80nJbooNUJMd z?h*`LntS;c^7>~z>iJi8Spv#QoA~2!lk{(nt})>D)59zOMa&QN19$Mrp!Ol%Dd(^7 zC<0CJ5F49U+AD%Bem0WB&5(S5>lh`#S40^7E2^5CnEAv)3@9SfphCWb2Jc7tq6s z1hHd>TsBX)7;>&(;27x^SkVFv9(K*fuaaI!5DP0RikM664LbXIFL83wpp-Pufp6i$ z-zae%yi9=3p~#CmT@~C9NzF+K(@hBmD%kbJ)3SAiAYGjjVW*ppb@l!+$B{ zVht0hD3usAd6h7O!A8(WVzudh6noc88l=3Dxx@8urtu-9UQHM0QyQc@si4>okiM2W zEL=r`q`7wIIAvl1sB7Zm#R928KcO!(T{`9he@R-!T@5%so>)d>WOI|xR5BuvkZ7#$y!v}}LlYws zpFc_|Wou0BfAUo?I>bEMS8-Cj6QmfLP-61nj(T;X0gpeuvm+T2Jt^BC%4-xU9t0c=^07 zIUdlIhym%g!b1LfAJ`(DFu-x*&ZNo>7sY+f*4G{3ymdt`8O=sJa~BXBmdZv$;g@3> zism?g>apE^Nt=|;3BS_NlOIfA$B!62lTV5V$r5Kgl=sIVjM#!ee-vH8FGAidQqels zL(#~1V!r)SouCCCH9(0!eG1o~BSwhpDa`xHVGSN3+w|^U)MqqrI9r2U)FfL#=B52I z4HDET7k57R9Whp@4FX^FczGt8WALG8=89h}_&Msdx)+Q=P5lT^g1tNHumSf!G3Bdx z52yn^6w|Kp+wCo*QD3jqH}hg%FHJ9UmUo~C)Lg(-&QHh%8CLxBw3v=?_nAmXs{z?| zDjGBLvAO0%w?|q+;Rt;MLlxaw-a$ZqQ)ob*d}_YF_@l5rHnI8Td>TDCfQ$u|Jewa} z!@Hky$ikjD}GMswun>)yq z=@r3Ai3=nL%5{5wGX907y3q?{q%t5Z^4<|6Hyi&QD-Tmb2%gEAFNj(Tj>`Q07gl zjY{r5lAKZyN=VA2fSx8fEJ((aE6B5B`i)|q-2uCCwoukTLl!>U>ToPUR78_h<8 z$jgU@04MKdU@O~6QRj+j;Bw530vrEV7mCvma*&r_w7mkl0702a(~ z=#uEQ=hHoo=$_-$Np!v)T&HES#UPK zIp~S39^DiDw(gh4g@QezQ{H?dJF;#Lk%s6G;(n$c(kg<$J;2U^31%CF^mvwF5Ek+>Mp-27wa{Fi|Z8X(>dw$rxMzV89+N=D6Rx-79)uo~lD zuz~0Z`5*q{f1$$tEAXdD6>s`8J$kMAbDjdpPro(j266y5HA{hCEF@O~BrPAuvcL_YeD`~H6$`eOWUmoyM$aWL3lvOGA4L0=RS^&kDq ze?P2gFPJ0o*I#{ZQ0Z3gT}6}+0l&wZ7d#K8b#7g=b2hHwFeF5o(|F zv^D-EdEg=MB|4g?C0I4wssEh?`EP}wNEP+JBs0TcAQB{O+Sh;%#lGQ6;`Ap)$=A#Q zOJmG=j3YZN%$@c;R9egp}b07^QGPjkeD$1N`d;`wTn0+GW05$-7;YRXTD<(3>$*~G;9#xc?v`KCeYyCR~ZOl1;G{Vi$oS`uv$ap^iNR# z9}eRGe^}uGNITl^jMAz4FXRAHJT4U?iN5>|4ck z?agkUZWrfkyc~=;%!UX^6NZ7u3mg&%dU+pI= zbNer_&UHgxfhiDrPD8;tscu$`iu*;&RYu(h`2J3PERlkKD^-)PvbquYWIiS3JLSm3 zK4_FV$Xn~!bVtaF`_?%%0({}87@epVbvX_kU3&C7b=#DQSIZt~!%p%&(&!O9ijec> z-=k3HFN~M{EK3Eimdc*kM>qHgqZfD{YUBPHA$cLEYI6+E@-RcJ>hN?QGO>s%d4oZb z%n#9(V3#6J-mPDwHzMvEwDRan9zbq4nFjd9QqcrPk+O!^RqLzRk|WalV-RbtaOc*i z>^Y`=p-PIUyPvoA?o!cfrMj2%#XMEw=h@)FJHigU5B$e>gKtZu>BvGxgDyax{U0z< z&q@D&z)mNz(A};e;Jy;m=_K&{#RlqkeiyaO||Fp$>CLc^XNRa@0_+?_(f{IS8JBW zGt%`lZ^nZHpFYHUxi;_8340;KVGLl}RSPBeW_DT%6@2RfP5+s@**gW0uJN9b8Onf8 z;eS*LM-jna5&RwwvjirNQV0nFl^uRkgWxe~(m*n*tz z3h78{5B{wP}`n+_e0p|m^~=|3PdtmEPh+l^$2 zngS7}__0T;98*LdWmOgY+`TrE2RvKsjb+9}Mdp6Sm6^{SXf+xZRa@5weCxRK2H~ZqGvSaL=yLy>}Cz_yMbC?ONHQ!b7J< zd}`c?x=&#(9P9zhy8H)K-vDM12uAzk%dL`*B9v|B4=tT24$;&$Wg;ZGEg&gL83(P_ zb5`>5RhmqtPnsPX^9r*6w1zst4?w6m>E1k#)^nGfofjg2ybDaivbjUh$LNyfEiN_( z(@p8lI8e|73qo>orbg|WHWI88hyw4K{&xvrdnH+S<)SetUD!Zf@wRumONYm%T zzX`ZRz(BZLPudm{UyT~50?9MG#gcT{=svwERZyNETVvO5IF4*zl*|%M*+pD*y|kQn0+;AfKJ4=OR~3lCtcWasXH2u-_8dtB4!`(k;l|+>dproQ&-a#Z zVt+)abez^33(WH!BV?Q&jPoYmX6e-yeTYwJlC4dM)rBpI)p} z1CqiU6=KTM2X9;U${ro2FDrwj?9!RY6=G}^y=AI@65eI8X;u@*ClPPmvljhz!kfD^ zO>nGPSa|hNZ^stCH>RGiE|HWw-v0#a#HOh|3EeCD3+J27@2hfB#JGH^vw=R7y32Q& z+U69HGLih8*pr9$>Mha#(iuyD$G61iso!Q{Ma1viA)5Pxy=)VI@Na2C{lt9bv^TBe zv;k}XE77D>&7(|JTHqt-nEvPr4yJTgd36qX>XSdc=3hMy-RiEtKrnghy}bVXv$a*E42|KMJA%4rOw%fY&%H6D;pA@Hk8-PXJQMvNj?1n45=FcT1{W+M?;iHZz zpiq9wuW)J*s48!9LU!ssS}upDvmn5-cBNz>esBi$;q{R0Aqp7$AOhp_BX^ zDc46^Ygx8dA?><>JvAmeI)>~Z9K;``2yXa#WQE(dRT?lw{n$Tjs49m>UJ+O+U(jZ` z2u=JJOBpfrQMPsY(`Gwc?XHu!oTT!t(pcGAGc!5(Ub^CU2=RaqX=I3RqoFsNb+(Gp zbAQ^*BU$<{%CS=PMe|qa*`-4-f+0OcMR}(sWg3pD#c{^`5~Zi^xCO>Nf~tYX`M8|s z!I(10fG9%MGPC-kXGb|XmWq$OvGAvY&+XSYZ1G>QFMB>xHvP4+QbJBcLugjb^Sej= zG0KA)9cq3uV;M3AjSS=!RTu_?IE0@Izb3hppV;|Mp=I-F%xI_epJaG&Mq%>Ec>Nxm zVR@cO`zDhcbx?H)hEisUY-W>v3TB7OS|Wj+ca$A6CtqF15CwUOOcp{a-l8iS2olX3 z&Y=fWCMJ0elV-eWd2SKD5onqx+0z*~or(y;LFumzYD{T(DB-`{mt#xGKV9##<8D|s zmg@tk0lpTpMf9p>ufkb~vP8?kyci;DAg(08pM#UT-B^`D>JonIS~Hwg5%T@G1}BTl zIT|yMJ2C(giOo9uBR+}p`dHMpo@dCzWu2b6*bBmghz`m_QPQAm)+nVQok~Hf>qE`! zH{vkwRt_}14|ma;_8YmqIAXv;0`Ue|xLxs{FKHhbu6Jg!kbyU=f7x$$&Dj*L{7121 zaFQt>?A6WYt}>O6y^s_Zqm#3ll(oHS>!j+KM?*iTu)Brd0m~o4nQN*2^KFN09^q4* z({eWMl6fq{ZB2W<_o-2?55tl;h#NPEC<&|BnxiD6X5NsaBiY|6elUa#R28~I-`!E3 zD2|e#eS0*p`s6vTmeM)jOAI|MZ=RM7JmgQvR2_T>M3pq9y_<~w@%T&im;WfqW6N}O zlm?B*n@6GaV7~s$h9lWNVN5L^f8uQjf;nBdFq7Pd@NLSDT1*U>Dhw8a$Co=p-YE_x zflQ3&Clspa;D7_V-Wc%pTdM2E<9z&FIF!aD_ql0U5BFO)oZr|1>KmIfqB3%qhO8_~ z+f*{IuoPP~krmZZQ*(1KcrNl7MKQPV!Rx3h;c-~k=~xRwduK8&cxWZa_Vkqz-TZT?npCU?)c~! z_}z?=b*bQ0q4sUD`6Y*~s9TNN%pMAUEL8F|!DoLHsBji2L`#x^W43D7*VG^wN%@Ejjjs?sE8FQKTRXFcK>FLudi8%Pz%n!cTF^8wBtS2fVr z?us&Btq4K6;(sfx%*nq`nq$-s=R`l_n1M#C^$fqyg*r`NjY}O^yx(0c2B*1aeLgn< zLJMwLVj7@z>6`DLGkG9iKW4(fHqjQ-1FND1&j&%x%KN;hPcy&#sDfDjl?%n}t1 ziGgPer2{VIZpbj|z+8lDiYXf?SO32C_O7;pKi=d`>iT5}5;A~ICW!vR3^BYjzBCW% zNvWOH$8Tldm4{lM>0(Qji#iL5%@^j=(6#dVmvfs>dj+fZ$M=lr(V!40#Ny&TcrtDA zz=DpYlyqcF{_qpRKH`(@RP_bBer$bZD5Wba*B+^eE-!60$KSjUozdq4t+AmOlwgb9O?SCSAS)f|Ab#JSF%EgxarZ7bb z#t<+#vyQV$EQQZRFE>9jTD~{D8@&}a-aU<|Frl%%sV}zJ#f^u*V2k&j9Q@(#`<8>O z*mAqL{wVjpEX6LiH{uY&SFxqbpsL2g(DwsEf>K zv7NnZY36s%YALPD0&lWDnSuNEA){(3B9o;=@$T6hMd+T^WjLOm!J=~wPD)TUzM?SH zHmCha^p9${V|#KhZz>U`YR3@nvx76XP}|QMZ5Z}~#k*N8*2?b&k&so^-T%KtwU%Ks=*CnY{?9`A>DuQhov)%{HHqZENd{Df;8V=BWpQ@H3i9v`sOJ*7xF z3@x?c1%(rdDd!sQMB+elFc;y#)cyjZj4ql z9=5x}E)woUG9R;~;NwTa0B=s<0$!+cy$Z;jgK09CW;R z<7kQ57lAH`hari(b}Wge=k2IjT3qq4%wSYK5M9F%4$O9zFh>e-pU+p#pVQs1CTd1> zBNgMR-m;D%j%G9#RHL*QCIHn1D#c`R!2`#eqGq__EKzAr_mV>zEzYsdG4{>^c=e2v z1j@*)BCh+jfybYSf|?j`Y#dT{sR>yDS(M~@*<$;leVdnn^)n|gVIj5y)KdeJ!ta?J zaaAyw30y7r$em3&d6_4vBI1%x`}u}i~+AEMxjvorIGPqBCO}3Vq%mv?0+2e|w zhOjn6RY;0+a4G`(sXK{>j(tDoJ`|uNeYC_`h&f7PQ9#TLx&7ARSZnB+l5OP^8h zJ|`KMx5@M!kO`>R0EE`zkkX|cu@+m?sAg!{Va{ggY5ka}V3DE95B4SsglvN$KajXn)FQRhYukaw5Td@~D3JEYrBB*0bEQ}R5G^Uq<*Sgtn z{#_s&{6dG`aR+O{j_D3J1ZF{K(u5CDcdm$fB=G0zUMSe3cm~mPXaZyAtaD0_RE$9p zdQr&f5{^L5W#Z*}rVPkC^g z@Kpvhe1P=$CwCa$ylOib@6_{-9Y5RjzF3VjxZKT00ZAP%|4nb3W&{}8$?yGbSNxMQ zByKlc=!%l3o*%PY!Hg7=Sm&#s7POi6J(LuD_Bk}#i>gd7xy^e8x4Z_hiX%C5s?~gR zFa*7D6Z(!Xsje2tZHm#X%+60a?0(^Mu5geil673OqM3?9ZQtgJB(d&oOWJ4O3sq+V zdNZBR8t6I~YQ80RK)uA%Xu@Qh0Ob7s_bK}O3%cRIKi)XeY7+fTP#3?5(?rXfp8p_F z-f3A@R@{D?Ra2Ih3VwvE|7Nepbgb(@K#f!py0_?7_}7jE3Ch_tmaozL>(Tn3ZF(qG zE-Vk-noW4na^wO`4^1mAU>aaMLW^deGgL`mD za0#%u1%fWHI0*^v?gV#-;O_2j2~L1T6Fd-Pad*3$laq7qd%t_1=iC2gW_!A;YO1>A z*WK!#Cz2Kcq7Fnx(@W}=5;QYQGIl;t4f`|vztruY3;$qzfD#4r|NT5a42Cr@x@cvq ziR+i1{?qW^3mDWIiBL}X|NWsKD!D0wHjPuFvjBg}fAaC)fmGnETmBCw|NBsu1Qv)w zVM;MRX8Qk#;_rX4240i?C7OR*{UfvQ3~yuFN1|) z76vc(gtX$P)~MlI!)(4T(`YBsRM6AMt$luHR{Tc1>Nj10bhOyBm;drRCl1Qc89Aej zj9>gbmZrr?Kl7IJIoB0=9EkF`SQP>saDN#3K80KmerWkHLX#1n2;#=aTNJ~~CU&fK zZPjhaDs#Sy*@#~1M#CF@M5Rp_+y?;wKyz_^J(2l`qsGllpl=T>X^T~_`PTG55Z|#Ef`&1%Kt93NxDj)K??JAng zkzMs#=+e3#~X1L$6-j z9+X7?T_ND7&T;>qoKZ}6joy*m0STMRonbT_!nwHqD|AgkWKP;WkZX1M%$zI0Qds+z zWq@2&+RC!O|H9;NtpQIVT*EF-N$F7jB07f`7p%l2cs3ye3Fa+>6i@}Axs>uhC>-b( z4H`U!8*qDMHGwvVV1_i^2z0(b*Q#AfXVuJ;oj5`(ki%SlscKgaA66q}GB;+~JctSp>0a~8L z+ZEX6@I%;>`L~xa%6-(gy9AiU?rtMir9H}%x(T;!J>as&HOG}YpzzPC9q4$byDa%!TLK;qr@ee(0 zUQgJ|3fb7=r*V5!pISZonBm`+1hWib>XftKdY>I}Fl&+3E|+N745`rp4;?r*_6->0 zmCfzed2o-j#pT>60I-{op+Rhru4`JD{-ER&Z-DjreyPGFwl6$*bK`aTWJnmtXJGoh zwa2sUcIy>%LgK{WQM`l3&eoDY!XI#zDMMEZshe4_Yp8kyr2KFS7&~#RtQi9vZQ=`eGsoVIV)XwDaZ>Xg6}P0#im{oZ z(_1z|(1|VWwnQM!Tge&WW+j2l?fZx#rvv-|@&;rGXzaO`U~EFmIIIpr=&tn$+d-bV zymV&0_PIZA>{7dgxLe3`S5Ethhzm}L>Y!i~ndQ;-7f(&7qaOtfzLt4<9lnrB%JPLr z?Uf1+@|-D~vWe$D+;R1sUQh^YDXefQ6MfTILYa&3kceW5zRlw22}1~b&#~oZbARUq zMX@OTEiY z54stdD&OaOF30|~TP5+u@2XXnGdo39O zhHLue#2Tjv6Ye-(Uu=N2u3d#;2UIyOG63iyIW(H>x&$!~Ny4%DE;}NFl91ckSnaEh zk3%+N5T2>}RvrLDaV@i+h}{0|_a+N@R)Md$%|r3^M~=vuC$sfZ`?KXkH;v^mCj@DP z%u#r_5c@J+eiqK!bDtjZ=VqM~u8WLsk&{YR^1iBkp!k?s;YNB)l+BN+#`vgRHsb0u zriCW6dZw=vR%TtmP!}CkUD*yj_;&P&_U!m;J#`}mA$b&vk6IczOY9V@J&w|s1(OkS zk?CBAIkA{I_4!^au@cpbv2n#alOqRy1xe&7H~qGUA6YpR8njc5pCF!Hp`uX_%F$C_ zk~|KU48y^Jz9dmQPvouaK7Jn3CQ{SfWc7ZAbay2$ue3U!=dCkp3S#%Q>EP&Ig$M3N zB})?(a(_HH&$A?wtG9l7f5AGeV4aR0N6azT_{rK|&K|OFK$PuXU6cxYZaVnITL9m{Spq?3eF7i2U0^<3E5YULCf6oZE>f|u~_ZLH+xPF z(5>+5rfYwYxk=kNIB6eHxwST*B30_qo*l&w_xsK{0{KVda}I7_e5gs)6OkNEnHvdy ze0NXymZx*n3@nt*6s4RGz;<@7K(C;W(<+ugD{k+9VSU4X$3rNL64z{_S<1^ji6#ef zW*A)PCl~l8qj$wjJvVQ{Mbto+hgwUH||x* zup}Hq)L*=6`f2%%V?3`#d5tf(IK1KN8Fz@%_lP6yeb6)2OmJPpDjh!i6Q*)tZGcr40Xsp212?@J^k9gX(s8h$hP019m^MeulQ zh+U^*S>Fk7r#BFKiuw;#gyqCH?%b0iOkAU1N8bU*y%(RYWxt`~e>AwXNPs$E@!6s% z8x<>N{3u?M-5(Xk52MscJy5p<{>WL0Ud5LOEG3>1^;-u)Vz8JG69VFjcX$rb%ay@0 zJlmpHm}(Ww@7!ob?_|Kj7+M0M!x&S`7|t$W3%b*N!Ptt7Z0;c^brNYbFB&kiQW29@ z)_hTn@_PY;Uf-`88fo{6)0oq>wB zeG(Zz9zo-3d&5AqT*=ThM(pR4ch2|Fv+CJ#coZ^{U12GEc#;mZmA~xTMTbLXjTJyFX76ca0qS^)9HeMB*9Hd&pc+$S9<2#652;@dhc;sWPQ= z(tD=wZj@P|$to?h+dg01r)3wB=POtE2&|mD=F8}!H9pGqm8`?6j76Q5d7^C9lPVk zhDGMVB3mJr`5T^2bWv?|5%PGWngJ-iAIfps-e;xDKDQ_YC}P%s6U;$eXgR6vzhcoq zGR36lJ;v@v^%B6LFN`N*awQvys@>9|mK=36ech0&~GsoQcN})}W04VXt z=K~?;Kn0F$%t;1p6yI<77)}kWc=h8i$%8^w=M>AefK?h-q2wyE25l_dFthMRP9%sx zPx~4rbvU}SBLY*c?6`a*8rZy3k&*J@V`ZU+@d)ox|9b)H)rTr;bEyw6u5S%9=HAQ? zMm?~u^Fr9XyrU(;fF1e1&-7$2Hg-jiuewpf2l7v+Iv=@)$)=v5J{RDA`H3cE7h0u3 zqut(dqmUSj#x^}E_|U_2kgTqyK>0YKWoof`?r{>$bef5u>f6pj`DCXosI-$`v|Ubqo?FY*y^{2L&JEG`J0XjubFI(iHtTOYjX%+(+^iPik@~ZIK~Ga zSrRFSbpcKGM*hx<`(8}YLH+g|VYSZq5#u=EOOw%U1}k2)92NhG!MKgwetVw~Zf6f9wjoJTxk1mrpSLE3gC!!ftx$4|>Ems)&~p zliMMW--Nst;hG_Zgddb_l&-+2%zd+9xJ8`oOK$FY#v>ib;kKm75}J0vv)~kCnv=;y zYF`gaA?wcaQSY6$!KAj{4yycVRk41ONo({)Pw!{ z&6DJo(>vsr@*3lcXrP$0WZo&f)>;9h<&r~`&0Fdj>&Pz?p$TUP4MCB8rOz8)-_gM9 zbmnX#>gcF|g`(q4Q$%B;Q`#jhR!K>+M8C7V+1CLXYK*-{5%Yan^4)?OTAbV#0jJ-yPbBiF$8143Prcv(rl zsZzmDr}FF1?Y*s7>UZBZ9V|?!cyAs*qZOO*wpi)*wKi2ehlYJftFe|Rv=@uf9ht$r zj_IJ6V+X__d5k0LtM9m(-Z299&dmicUN(22dUVUrmC~&Yj%yVPD-JMN zA%-cok{ElhbXemZ#H=y?O_ZB*q)NXhSpth`<#)`{gG&Q-oasKMqW=!2u;56QksUvV zuKQ&pUA^~t;x3?m>5*OBSM;PE(Aw9Q(kr&Rn2f?s*tp);Bq#{qxjwndT{bt4Hy$qG z;gzlW@T`i7=bV+rZsfoy@oHH9Xr_2&&W;VvPDK-(#+)#1xS~JtIy~P^@9Tb^4k^|T z?y$PcM1?yIiGEbf``XS!9m4tf9FO>;RLG!jpwt)#uF#XJWes~hTY}s$%t7!r#%EKv zlyFXO6=PTz66u3whrPqtGh2RobRFl`(j1o+3b7gO3_?$N3P65gxz0qETv^G%dXeE{TxM9JUTtRMMwx%!PHkDDq3KQ*-|%f zIu=W(ti+@krIV}A>?$~o>j=c=z16Ch2F|-9_eV?fCVfy64es-D*Zf2(-%S~vU%^(} ze!~l5D>A9p%irU#KnzHhlr2==r&B{%@UH;>LP$(kEohH)<()`E-O={=qG&|qcjU=X z7{;Zl;~^&$fP@nHP#NBRC7JTpVP~~rL8)3imhJpje+8h};S_LY72hsK$kOrUAiJmJ z$gQYgN9_dLH~$#qy!SKdMeT#6`DOY${bt#=m~>B0+2niu9FsW{@oBOEt&U14Y{(+w zc~d8PZAx`b=E11I%y^e{vU1jk#_0(7!9PhRGjq6mu(^32Tg+d+WjHG2kxh|M<0E}@ zrz{X1F<|cU8NZUl-5rTK7CfV%YUn=Llq)}Y68uKtW(HB_7$F0+QPBHtm~gsr!54FC z-PRjl_KU76cO0(iaOs__>tKReR@_9pip_JKnU}I9g7(nG^2iP8d_26-W8tUJVUubkO{i>WM34L zz)sxCm=GLNTQ3c^1E5pYI;5e;6jexPiz=0SyO)(@iR#I0J^jkaFNKk zAz~L_tIUWBH#LFjYRD@c<2ZbKH6~+fF*(qp?IF)dH$Lp@$?C*RCyDpW(p@>EkIJkt zr4#M_;F^thUoU0}N(RQ{pzKGzJB(0*+f-;cx3g8%P3OWs5_W#^YVNrz%^+a?vVEqL>wlkEBXQ`ox`b3T$jYq3q>8d4LOx)6VCzc;)s_oRp? zO#A*yxcI>9PkQ%<9o~@lyhsK^t<~@O;`$yn2Tn4-ux6U3^$_vHH^+!_M{x`D1!K;l zoZd*XaN$LGl5@q6%KnZ7=pcL9yP1pR^qdYU|$K39f!(Q1N(h&_G*Dta6gG@u2HWDXk*f6=!`fC=Z^$ zMCNLP7+igFdDr2$iOs^A;rHm`!1H$FQyP#3z5C3YU-WWVk_nu}vb7;8#!d|sGcWNr z_homJI1*Jxl6|5M!3I4F=)e?LmG+#hHWH92zjHvHBTI32jW&;n_HX?7@G0~ATf>+G zm(A2n85MUDZOR>VY2LD@`U*9Sh9$5<+a|I865)L&H_(8^LYH`WIb!)0G`(LXZccUe zeL0u#$JfY4vVQ*J7$#rGBhJn%gbdnNgz)i94w{+iPS@dW;DMr6zKNLtJ@6F^;mV>C zs(<)o6<**etDZb(^+(id+ccNb+#a>?)HvDud5ooN{Wsat`CU z)enT9aB|VmrcZvr|2!Q68IC!uW5dgMH9p5Qt|rP*w&YNGW@eoD=}BFr_CyRv_gsq+ zU5MCb?~sfl_#?~s%%n(MWB$iXF8JD!o9oRL5vhpSSYcA2sdv&L_N*50dek0G?oY1ufXp=@{B)J;0GFXZ|ZM^)|m=G>)|Ln?i^foB4l>e%(JCv-*e$nSxmvz0cIf} zN8^<`KmUy%ZF$i-fZ*pOqx0Qc(-ZUVpsDf|Xb5ckxt`VqJhrzd{8E-bVc`}_O036R zno`^v@Fb;#(SXp5UsqX_(IR6_Hu*GPZrF-8`YMQlZ#=n_0-fu_B1S_ZlMHpDs@J|j zo}!*o*~&xW@a7?Jaf}{K%XG$F!~(g%<*3I^l%+E?0~G{Z>hHYn@0y^#ywy5~B;>2E zUFo>QVJ>`Iz=Ykg{b?{Ar&Zn0uvmsxhpS1ZdBFNDY=NX0R)-9(QmCNAQdM~VL0$-Xj z$K}_=A%D}Ej>+h)t)8|MaXLBkIGO365<7PI8mEdeNl?_cL^VT(V5&nRDv&8(W z=|su3rquKXFjZd>PSA9qBFwoYXicNh7ZBfDsr-PTOmvqFkV zjO!=`PD{h_>>7N8N%1L@YubM1Fi5EKy);(qRrO8dCG^p(&`y%mf4lXH95Ur*9S-8kTc>oR!739d!%OvNDIdbp6y%^pDG$ov(j7%Rco1BQXxTZIA3|p3#h_Z69@&!GHc*lF1$MqT#T$s=d)n=@Px@;$i_wWOtP1B+o0dEZpa7Ya`RS`rB_w$m`B z-^?^#$&_f`Mn?CZs2-YVJE{khWw_E@!*d1oyLC;qg9j<>AG^BJ#rJSpp}PS>)MKZp zKht^e?**75$8KN?_!iYG@FI>Tq0srf?(CN~H_W=YCb~vo9)?q4f?Zgi*vQT+I@!WN z_3SPw%(}hgO;@?RB>zxNREL6e{iC)!4|7Dh^USx1?A#&|8RnJ{_<=Oaz>$;%orIp% z53ei+gnnvcCB57;l-eG>)1UW9e>5^8Q;EhZ>||3U{}2wGvj74`hIe~iFD2nBIWf}4 zrDT)#4B;LXO@XI`d6!VTj+LidXc zG2?fL9Bh18S{$TUX@d$M<~OzPE_ndu(Y04TDE1kAQtD22yn^YR3=*vM@nO@j9+4p% zvfa`|5G&G@P{vh88P1LAp`5`hGK4X-Su(vWWsUr%D2T7&mAbRT>E3ZnhZEtoXC^Gm zj5;%2$@#*VhsoLE#_{*cg!v~t)9CiW#tbx!ZdLq{T?d*3g0`d12xwl%b1|a{4cyg$b?(1+qi) z)YB6B1A;-+oQ{wSK~-&?t80}jH^p;%$3s&RNd}`#0MIVZ)feV??I=lSL5Yr`nrMET zPvJ-)PZ>+}Pj%hy8l^@8a$IXV{OeQnpU4E5oOI%N4}#W;4`KFWo$08!{eRlbMs^YJ z2JcKTAxsI5xxnkCf@*m$MwIBv_*p5Em`*mQe?f0w1nY6aJ&bcZrQP}iU!oPq#C}F3 zl(3=vE~*t0{?evXF77SDh##9PlZMzwEL-o*xm{D`=Y}b&M(3?Pf_)l?Eog(QkfY}J z=zyB2B(YU02l{=Mz`6zTuGb12zWB%!AL7;r)x7BEy7!r7IFH~9A#Ts^X;S3Prlvi{ zBJI&kJnMHu<|IorP%<8ogIHB}JSOT1_w{Wu-&ng3s?#Kj97K^jhPbjfEClH-4Im9Z zj_7kOiLG1W=w5%wji|}Rn{COsNL6`t=_@X_B@{Nj;v^?c(`p&^+B!1;O}yd7LOa1W zE>h0Bk-?E~A3hR|TF_u_^^58nn@Z=Kstgwt_d+~jDM))&n*jtVz~WfF8?yugiFvdU z55)!Dc(SuW>8YGY>52<2HfVe<#g!Px%4@RC_Ke6L#od8ht+b0no6gdlHm&YDN^4~9{W6p z!El>HN^n)LiwhSh^Qnba%REh{7jYUu3FZpQx}8`m?qq5Au=Fb(sO#nL-3o`o)TJp9 zmF0QkHFbl25{Q6Z;a0jc9j16MDQ#`2)Yp_cQ#6w~p}vunxH?8cd3Nn$KsS1+_WL$^ zX{3o3WT@%*zJ0^_cNN#44)Ckg55q;=`^$P=cc9Q;R^%`X=;XgUN_*CSAa2Qxlk){7 z3@gz&f88laA?sGuv96A6S-CeUJXmL194x``wlKgv{8sq|UO8p6`p+CWHs7I8HeV*z z=EijHHUjBP1gu8KT?>p(@vwg1+Nn4-nbEqTIRDGfzEFu*oVGfArTpo~mR7;6ElrVF z{g6ZM?#v1pG~4t~Rw(((TpBAt?P5A9m_IFtqR)-JerDUu=9WQN2q8x#+@}qH^BA^9 z<)bb#*ek^G*1+cgj$LS57H3uH&9eh!bLqE7aP^DBeY0xhh|0I63b}=Gm9Znt3mh;{ zw;`{Ct~;{xeMW(^0z~Y}aB5At0hY~~GUh>)ug)k*e9zun+Pzu&3~5JL>Cn`YrU(os z5@hBM;(WDp|KpMHTaVSpYR-Ya?5i755 z3al+Ws1O(V_a5umRJ>>!KxK`jl0c;w+wC$74uhq<*2@F&g!*&FE@>a{3ZUG+CV&JK z$(3Z}5At=g%gdMEjDkr#iIpU&47%#j^alwiB7KO-=6L#B#rPl+c{$h}Y2afNWn z`7HsNPtGvptBOaL76^qbiv!+%QWm+b?A9a38&k^pg4nZ|`{X>$ z@Ka@~&h09M)e`|JSUp2D>Gv)9%1QQ?*fPr``w*j5B)z#w8c z2doB$LZ-&~gzAJr$B5Tvpj<*TAyZ1+VQBJ$`-rpEmp5>Gm1bih^}_3N#u{6nsyFNn z-{ft1Rf8l|F?7-5+PqYgMdPeNA?4Vh#*HB zW@La#yt7i?iFGbxp-=D}nnE1Y-b_ebs;ug)RFnM!T7cN>Y>0TFS0NYpsXNrmw zKg%2kJ+a!z!0j0YP^(%VljiO_$;$u~DPp){Q{VkT%1*e#2*u%NCK9yNr5s<0%JSd6 zh8ex_KyPTwrJeC{Iy9IfOv^K}G9!_t%>;Z{XpC9F+IkrDA^f=g_*0Hx)D#-v{T0=l zy?~;qc%Kn6{+V(Aj`cC=s8wqn|TH#9KP58OW6bzplCA)(}9}}-`|7$ zi@=^W1}IL|vQn)$-Z-{A+eu6DRYzf+)1RSX(m`b%Y`?DYAHBTJQAP0ToMEus8YBD#Ra35Ei@!(eQ6I|I_#@Yzi~j>Pybd%4W_mz2u)j8f*deVM=D5 z5VZpTuF(92H~Y~Y=4UXagO1Vl*&=?^XyL_EdfK4%??(Qd0 +![torchrun diagram](./artifacts/torchrun.png) As a side effect of this structure, every process will run until (1) script completion or (2) another process stops communicating (e.g. if killed by the system for abnormal reasons). The status of other processes is not actively communicated: so if some process is indeed killed, it would take 10 minutes (by default) for the remaining processes to time-out. Also, since this approach parallelizes the entire script, we can't catch and handle these system-level issues as exceptions. `torchrunx` offers a functional interface, with a launcher–worker topology, instead. -> +![torchrunx diagram](./artifacts/torchrunx.png) {func}`torchrunx.Launcher.run` runs in the current, *launcher* process. It uses SSH to start an *agent* process on every node (specified in `hostnames`), which in turn spawn `M` *worker* processes. The workers form a distributed process group and each executes `func(*args, **kwargs)` in parallel. Once all workers are finished, all of their returned values are propagated to the initial launcher process. Our agents constantly communicate (over their own GLOO-backend distributed group), so any agent or worker failures are immediately propagated, and all launched processes are terminated. Worker exceptions and system failures are propagated to and raised by {func}`torchrunx.Launcher.run`. From 6075b0ca0f7d6865e510455105242e00946ed370 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 22 Feb 2025 12:29:55 -0500 Subject: [PATCH 126/141] updates to docs; cpu/gpu workers --- docs/source/features/cli.md | 7 +-- docs/source/features/customization.md | 35 ------------ .../features/{workflows.md => general.md} | 16 +++++- docs/source/features/logging.md | 19 +++++++ docs/source/index.rst | 4 +- src/torchrunx/launcher.py | 20 ++++--- src/torchrunx/utils/environment.py | 56 ++++++++++++------- 7 files changed, 84 insertions(+), 73 deletions(-) delete mode 100644 docs/source/features/customization.md rename docs/source/features/{workflows.md => general.md} (55%) create mode 100644 docs/source/features/logging.md diff --git a/docs/source/features/cli.md b/docs/source/features/cli.md index cae7ee98..40097ecc 100644 --- a/docs/source/features/cli.md +++ b/docs/source/features/cli.md @@ -1,4 +1,4 @@ -# CLI Integration +# From CLI Arguments We can automatically populate {mod}`torchrunx.Launcher` arguments using most CLI tools, e.g. [`tyro`](https://brentyi.github.io/tyro/) or any that [generate interfaces from dataclasses](https://brentyi.github.io/tyro/goals_and_alternatives). @@ -6,12 +6,9 @@ We can automatically populate {mod}`torchrunx.Launcher` arguments using most CLI import torchrunx import tyro -def distributed_function(): - ... - if __name__ == "__main__": launcher = tyro.cli(torchrunx.Launcher) - launcher.run(distributed_function) + results = launcher.run(...) ``` `python ... --help` then results in: diff --git a/docs/source/features/customization.md b/docs/source/features/customization.md deleted file mode 100644 index cdd95708..00000000 --- a/docs/source/features/customization.md +++ /dev/null @@ -1,35 +0,0 @@ -# Customization - -## Propagating exceptions - -Exceptions that are raised in workers will be raised by the launcher process. - -A {mod}`torchrunx.AgentFailedError` or {mod}`torchrunx.WorkerFailedError` will be raised if any agent or worker dies unexpectedly (e.g. if sent a signal from the OS, due to segmentation faults or OOM). - -## Environment variables - -Environment variables in the launcher process that match the `default_env_vars` argument are automatically copied to agents and workers. We set useful defaults for Python and PyTorch. Environment variables are pattern-matched with this list using `fnmatch`. - -`default_env_vars` can be overriden if desired. This list can be augmented using `extra_env_vars`. Additional environment variables (and more custom bash logic) can be included via the `env_file` argument. Our agents `source` this file. - -We also set the following environment variables in each worker: `LOCAL_RANK`, `RANK`, `LOCAL_WORLD_SIZE`, `WORLD_SIZE`, `MASTER_ADDR`, and `MASTER_PORT`. - -## Logging - -We forward all logs (i.e. from {mod}`logging` and {mod}`sys.stdout`/{mod}`sys.stderr`) from workers and agents to the launcher. By default, the logs from the first agent and its first worker are printed into the launcher's `stdout` stream. Logs from all agents and workers are written to files in `$TORCHRUNX_LOG_DIR` (default: `./torchrunx_logs`) and are named by timestamp, hostname, and local_rank. - -{mod}`logging.Handler` objects can be provided via the `handler_factory` argument to provide further customization (mapping specific agents/workers to custom output streams). You must pass a function that returns a list of {mod}`logging.Handler`s to ``handler_factory``. - -We provide some utilities to help: - -```{eval-rst} -.. autofunction:: torchrunx.utils.file_handler -``` - -```{eval-rst} -.. autofunction:: torchrunx.utils.stream_handler -``` - -```{eval-rst} -.. autofunction:: torchrunx.utils.add_filter_to_handler -``` diff --git a/docs/source/features/workflows.md b/docs/source/features/general.md similarity index 55% rename from docs/source/features/workflows.md rename to docs/source/features/general.md index c6f82cea..c77e4a8b 100644 --- a/docs/source/features/workflows.md +++ b/docs/source/features/general.md @@ -1,4 +1,4 @@ -# Workflows +# General ## Multiple functions in one script @@ -45,3 +45,17 @@ for r in range(n_retries + 1): break ``` + +## Environment variables + +Environment variables in the launcher process that match the `default_env_vars` argument are automatically copied to agents and workers. We set useful defaults for Python and PyTorch. Environment variables are pattern-matched with this list using `fnmatch`. + +`default_env_vars` can be overriden if desired. This list can be augmented using `extra_env_vars`. Additional environment variables (and more custom bash logic) can be included via the `env_file` argument. Our agents `source` this file. + +We also set the following environment variables in each worker: `LOCAL_RANK`, `RANK`, `LOCAL_WORLD_SIZE`, `WORLD_SIZE`, `MASTER_ADDR`, and `MASTER_PORT`. + +## Exceptions + +Exceptions that are raised in workers will be raised by the launcher process. + +A {mod}`torchrunx.AgentFailedError` or {mod}`torchrunx.WorkerFailedError` will be raised if any agent or worker dies unexpectedly (e.g. if sent a signal from the OS, due to segmentation faults or OOM). diff --git a/docs/source/features/logging.md b/docs/source/features/logging.md new file mode 100644 index 00000000..52e6128d --- /dev/null +++ b/docs/source/features/logging.md @@ -0,0 +1,19 @@ +# Custom Logging + +We forward all logs (i.e. from {mod}`logging` and {mod}`sys.stdout`/{mod}`sys.stderr`) from workers and agents to the launcher. By default, the logs from the first agent and its first worker are printed into the launcher's `stdout` stream. Logs from all agents and workers are written to files in `$TORCHRUNX_LOG_DIR` (default: `./torchrunx_logs`) and are named by timestamp, hostname, and local_rank. + +{mod}`logging.Handler` objects can be provided via the `handler_factory` argument to provide further customization (mapping specific agents/workers to custom output streams). You must pass a function that returns a list of {mod}`logging.Handler`s to ``handler_factory``. + +We provide some utilities to help: + +```{eval-rst} +.. autofunction:: torchrunx.utils.file_handler +``` + +```{eval-rst} +.. autofunction:: torchrunx.utils.stream_handler +``` + +```{eval-rst} +.. autofunction:: torchrunx.utils.add_filter_to_handler +``` diff --git a/docs/source/index.rst b/docs/source/index.rst index 3097ce23..b6144716 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -12,9 +12,9 @@ :caption: Features :hidden: - ./features/customization.md - ./features/workflows.md + ./features/general.md ./features/cli.md + ./features/logging.md ./features/slurm.md .. toctree:: diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 3dba4c75..5a3c2c34 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -51,14 +51,15 @@ class Launcher: """For configuring the function launch environment.""" hostnames: list[str] | typing.Literal["auto", "slurm"] = "auto" - """Nodes on which to launch the function. By default, infer from localhost or SLURM.""" - workers_per_host: int | list[int] | typing.Literal["auto"] = "auto" + """Nodes to launch the function on. By default, infer from SLURM, else ``["localhost"]``.""" + workers_per_host: int | list[int] | typing.Literal["cpu", "gpu"] = "gpu" """Number of processes to run per node. By default, number of GPUs per host.""" ssh_config_file: str | os.PathLike | None = None """For connecting to nodes. By default, ``"~/.ssh/config"`` or ``"/etc/ssh/ssh_config"``.""" - backend: typing.Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None = "auto" + backend: typing.Literal["nccl", "gloo", "mpi", "ucc"] | None = "nccl" """`Backend `_ - for worker process group or ``None``. By default, NCCL if GPUs detected, else GLOO.""" + for worker process group. By default, NCCL (GPU backend). + Use GLOO for CPU backend. ``None`` for no process group.""" timeout: int = 600 """Worker process group timeout (seconds).""" copy_env_vars: tuple[str, ...] = DEFAULT_ENV_VARS_FOR_COPY @@ -80,10 +81,10 @@ def set_handler_factory( ) -> Self: """Provide a ``factory`` to set custom handling of agent and worker logs. - Parameters: - factory: Factory function to generate :obj:`logging.Handler` objects. + See `Custom Logging `_. - See `custom logging `_. + Parameters: + factory: Factory function used to generate :obj:`logging.Handler` objects. """ self.handler_factory = factory return self @@ -110,10 +111,11 @@ def run( # noqa: C901, PLR0912, PLR0915 ### - hostnames, workers_per_host, backend = resolve_environment( - self.hostnames, self.workers_per_host, self.backend, self.ssh_config_file + hostnames, workers_per_host = resolve_environment( + self.hostnames, self.workers_per_host, ssh_config_file=self.ssh_config_file ) ssh_config_file = self.ssh_config_file + backend = self.backend timeout = self.timeout env_vars = { diff --git a/src/torchrunx/utils/environment.py b/src/torchrunx/utils/environment.py index b5fd5fd2..792d8b02 100644 --- a/src/torchrunx/utils/environment.py +++ b/src/torchrunx/utils/environment.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Literal, Union +from typing import Literal from typing_extensions import TypeAlias @@ -10,6 +10,7 @@ "auto_hosts", "build_launch_command", "execute_command", + "get_cpus_per_host", "get_gpus_per_host", "in_slurm_job", "slurm_hosts", @@ -27,15 +28,14 @@ Hostnames: TypeAlias = list[str] WorkersPerHost: TypeAlias = list[int] -Backend: TypeAlias = Union[Literal["nccl", "gloo", "mpi", "ucc"], None] def resolve_environment( hostnames: list[str] | Literal["auto", "slurm"], - workers_per_host: int | list[int] | Literal["auto"], - backend: Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None, - ssh_config_file: str | os.PathLike | None, -) -> tuple[Hostnames, WorkersPerHost, Backend]: + workers_per_host: int | list[int] | Literal["cpu", "gpu"], + *, + ssh_config_file: str | os.PathLike | None = None, +) -> tuple[Hostnames, WorkersPerHost]: if hostnames == "auto": hostnames = auto_hosts() elif hostnames == "slurm": @@ -43,21 +43,17 @@ def resolve_environment( if isinstance(workers_per_host, int): workers_per_host = [workers_per_host] * len(hostnames) + elif workers_per_host == "cpu": + workers_per_host = get_cpus_per_host(hostnames, ssh_config_file=ssh_config_file) + elif workers_per_host == "gpu": + gpus_per_host: list[int] = get_gpus_per_host(hostnames, ssh_config_file=ssh_config_file) + if any(g == 0 for g in gpus_per_host): + hosts_without_gpus = [h for h, g in zip(hostnames, gpus_per_host) if g == 0] + msg = f'workers_per_host="gpu", but no GPUs detected on: {hosts_without_gpus}.' + raise RuntimeError(msg) + workers_per_host = gpus_per_host - if workers_per_host == "auto" or backend == "auto": - gpus_per_host: list[int] = get_gpus_per_host(hostnames, ssh_config_file) - gpus_on_every_host: bool = all(g > 0 for g in gpus_per_host) - - if workers_per_host == "auto": - if not gpus_on_every_host: - msg = 'workers_per_host="auto", but no GPUs detected on at least one host.' - raise RuntimeError(msg) - workers_per_host = gpus_per_host - - if backend == "auto": - backend = "nccl" if gpus_per_host else "gloo" - - return hostnames, workers_per_host, backend + return hostnames, workers_per_host def auto_hosts() -> list[str]: @@ -81,7 +77,25 @@ def slurm_hosts() -> list[str]: return subprocess.check_output(["scontrol", "show", "hostnames"]).decode().strip().split("\n") -def get_gpus_per_host(hostnames: list[str], ssh_config_file: str | os.PathLike | None) -> list[int]: +def get_cpus_per_host( + hostnames: list[str], *, ssh_config_file: str | os.PathLike | None = None +) -> list[int]: + """Count the number of GPUs on each host.""" + python = shlex.quote(sys.executable) + command = f"{python} -c \"import os; print(len(os.sched_getaffinity(0)), end='')\"" + return [ + int( + execute_command( + command, hostname, ssh_config_file=ssh_config_file, return_stdout_stderr=True + )[0] + ) + for hostname in hostnames + ] + + +def get_gpus_per_host( + hostnames: list[str], *, ssh_config_file: str | os.PathLike | None = None +) -> list[int]: """Count the number of GPUs on each host.""" python = shlex.quote(sys.executable) command = f"{python} -c \"import torch; print(torch.cuda.device_count(), end='')\"" From 9bcb7f2703cd071c14a9c3851acc970546858428 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 22 Feb 2025 15:00:57 -0500 Subject: [PATCH 127/141] no propagate_exceptions option --- src/torchrunx/launcher.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 5a3c2c34..83c5de71 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -69,8 +69,6 @@ class Launcher: """Additional environment variables to load onto workers.""" env_file: str | os.PathLike | None = None """Path to a ``.env`` file, containing environment variables to load onto workers.""" - propagate_exceptions: bool = True - """Whether to raise specific worker exceptions or :exc:`torchrunx.WorkerFailedError`.""" handler_factory: typing.Callable[[], list[logging.Handler]] | typing.Literal["auto"] | None = ( field(default="auto", init=False) @@ -89,7 +87,7 @@ def set_handler_factory( self.handler_factory = factory return self - def run( # noqa: C901, PLR0912, PLR0915 + def run( # noqa: C901, PLR0912 self, func: typing.Callable[FunctionP, FunctionR], *args: FunctionP.args, @@ -99,10 +97,8 @@ def run( # noqa: C901, PLR0912, PLR0915 Raises: RuntimeError: Configuration issues. - Exception: Exceptions raised in worker processes are propagated - (if ``propagate_exceptions=True``). - WorkerFailedError: If a worker fails (e.g. from a segmentation fault) - or raises an exception with ``propagate_exceptions=False``. + Exception: Exceptions raised in worker processes are propagated. + WorkerFailedError: If a worker fails (e.g. from a segmentation fault). AgentFailedError: If an agent fails, e.g. from an OS signal. """ if not dist.is_available(): @@ -127,7 +123,6 @@ def run( # noqa: C901, PLR0912, PLR0915 env_vars.update(self.extra_env_vars) env_file = self.env_file - propagate_exceptions = self.propagate_exceptions handler_factory = self.handler_factory ### @@ -218,9 +213,7 @@ def run( # noqa: C901, PLR0912, PLR0915 for s in agent_statuses: for v in s.return_values: if isinstance(v, ExceptionFromWorker): - if propagate_exceptions: - raise v.exception - raise WorkerFailedError from v.exception + raise v.exception if isinstance(v, WorkerFailedError): raise v From 7bdef69053822e6b8ba1351b7c388ed988dedf97 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 22 Feb 2025 15:12:40 -0500 Subject: [PATCH 128/141] moved resolution for log handlers --- src/torchrunx/launcher.py | 15 ++++++++++----- src/torchrunx/utils/logging.py | 22 ++++------------------ 2 files changed, 14 insertions(+), 23 deletions(-) diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 83c5de71..e9d2c492 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -29,7 +29,7 @@ resolve_environment, ) from .utils.errors import ExceptionFromWorker, WorkerFailedError -from .utils.logging import LoggingServerArgs, start_logging_server +from .utils.logging import LoggingServerArgs, default_handlers, start_logging_server DEFAULT_ENV_VARS_FOR_COPY = ( "PATH", @@ -87,7 +87,7 @@ def set_handler_factory( self.handler_factory = factory return self - def run( # noqa: C901, PLR0912 + def run( # noqa: C901, PLR0912, PLR0915 self, func: typing.Callable[FunctionP, FunctionR], *args: FunctionP.args, @@ -123,7 +123,14 @@ def run( # noqa: C901, PLR0912 env_vars.update(self.extra_env_vars) env_file = self.env_file - handler_factory = self.handler_factory + if self.handler_factory is None: + + def handler_factory() -> list[logging.Handler]: + return [] + elif self.handler_factory == "auto": + handler_factory = partial(default_handlers, hostnames, workers_per_host) + else: + handler_factory = self.handler_factory ### @@ -158,8 +165,6 @@ def run( # noqa: C901, PLR0912 handler_factory=handler_factory, logging_hostname=launcher_hostname, logging_port=logging_port, - hostnames=hostnames, - workers_per_host=workers_per_host, ) stop_logging_event = Event() diff --git a/src/torchrunx/utils/logging.py b/src/torchrunx/utils/logging.py index f1399411..c98bd160 100644 --- a/src/torchrunx/utils/logging.py +++ b/src/torchrunx/utils/logging.py @@ -29,7 +29,7 @@ from multiprocessing.synchronize import Event as EventClass from pathlib import Path from socketserver import StreamRequestHandler, ThreadingTCPServer -from typing import Callable, Literal +from typing import Callable import cloudpickle from typing_extensions import Self @@ -187,11 +187,9 @@ def shutdown(self) -> None: class LoggingServerArgs: """Arguments for starting a :class:`_LogRecordSocketReceiver`.""" - handler_factory: Callable[[], list[Handler]] | Literal["auto"] | None + handler_factory: Callable[[], list[Handler]] logging_hostname: str logging_port: int - hostnames: list[str] - workers_per_host: list[int] def serialize(self) -> bytes: """Serialize :class:`LoggingServerArgs` for passing to a new process.""" @@ -203,23 +201,11 @@ def from_bytes(cls, serialized: bytes) -> Self: return cloudpickle.loads(serialized) -def start_logging_server( - serialized_args: bytes, - stop_event: EventClass, -) -> None: +def start_logging_server(serialized_args: bytes, stop_event: EventClass) -> None: """Serve :class:`_LogRecordSocketReceiver` until stop event triggered.""" args = LoggingServerArgs.from_bytes(serialized_args) - log_handlers = [] - if args.handler_factory is None: - log_handlers = [] - elif args.handler_factory == "auto": - log_handlers = default_handlers( - hostnames=args.hostnames, - workers_per_host=args.workers_per_host, - ) - elif isinstance(args.handler_factory, Callable): - log_handlers = args.handler_factory() + log_handlers = args.handler_factory() log_receiver = _LogRecordSocketReceiver( host=args.logging_hostname, From e3a62787a7e2bfb73eedb536149d889a368688e3 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sat, 22 Feb 2025 15:55:36 -0500 Subject: [PATCH 129/141] more updates to docs --- docs/source/features/general.md | 61 -------------------- docs/source/index.rst | 10 ++-- docs/source/{features => usage}/cli.md | 2 +- docs/source/usage/general.md | 67 ++++++++++++++++++++++ docs/source/{features => usage}/logging.md | 0 docs/source/{features => usage}/slurm.md | 2 +- 6 files changed, 74 insertions(+), 68 deletions(-) delete mode 100644 docs/source/features/general.md rename docs/source/{features => usage}/cli.md (96%) create mode 100644 docs/source/usage/general.md rename docs/source/{features => usage}/logging.md (100%) rename docs/source/{features => usage}/slurm.md (94%) diff --git a/docs/source/features/general.md b/docs/source/features/general.md deleted file mode 100644 index c77e4a8b..00000000 --- a/docs/source/features/general.md +++ /dev/null @@ -1,61 +0,0 @@ -# General - -## Multiple functions in one script - -We could also launch multiple functions (e.g. train on many GPUs, test on one GPU): - -```python -import torchrunx as trx - -trained_model = trx.launch( - func=train, - hostnames=["node1", "node2"], - workers_per_host=8 -).rank(0) - -accuracy = trx.launch( - func=test, - func_args=(trained_model,), - hostnames=["localhost"], - workers_per_host=1 -).rank(0) - -print(f'Accuracy: {accuracy}') -``` - -{mod}`torchrunx.launch` is self-cleaning: all processes are terminated (and the used memory is completely released) before the subsequent invocation. - -## Retries - -Sometimes distributed functions will fail randomly (OOM, networking, or resource errors), and should be executed again. Remember, {mod}`torchrunx.launch` will raise whatever exception its workers raise, so you can catch specific exceptions as you normally would. To retry launching a distributed function, we recommend doing the following: - -```python -import torchrunx as trx - -n_retries = 5 - -for r in range(n_retries + 1): - try: - trx.launch(train, hostnames=...) - except CudaOOMError: - print("retrying") - if r == n_retries: - raise Exception("maximum retries attempted") - else: - break - -``` - -## Environment variables - -Environment variables in the launcher process that match the `default_env_vars` argument are automatically copied to agents and workers. We set useful defaults for Python and PyTorch. Environment variables are pattern-matched with this list using `fnmatch`. - -`default_env_vars` can be overriden if desired. This list can be augmented using `extra_env_vars`. Additional environment variables (and more custom bash logic) can be included via the `env_file` argument. Our agents `source` this file. - -We also set the following environment variables in each worker: `LOCAL_RANK`, `RANK`, `LOCAL_WORLD_SIZE`, `WORLD_SIZE`, `MASTER_ADDR`, and `MASTER_PORT`. - -## Exceptions - -Exceptions that are raised in workers will be raised by the launcher process. - -A {mod}`torchrunx.AgentFailedError` or {mod}`torchrunx.WorkerFailedError` will be raised if any agent or worker dies unexpectedly (e.g. if sent a signal from the OS, due to segmentation faults or OOM). diff --git a/docs/source/index.rst b/docs/source/index.rst index b6144716..69c2d089 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -9,13 +9,13 @@ contributing .. toctree:: - :caption: Features + :caption: Usage :hidden: - ./features/general.md - ./features/cli.md - ./features/logging.md - ./features/slurm.md + ./usage/general.md + ./usage/cli.md + ./usage/logging.md + ./usage/slurm.md .. toctree:: :caption: Examples diff --git a/docs/source/features/cli.md b/docs/source/usage/cli.md similarity index 96% rename from docs/source/features/cli.md rename to docs/source/usage/cli.md index 40097ecc..5dd6c253 100644 --- a/docs/source/features/cli.md +++ b/docs/source/usage/cli.md @@ -1,4 +1,4 @@ -# From CLI Arguments +# From the CLI We can automatically populate {mod}`torchrunx.Launcher` arguments using most CLI tools, e.g. [`tyro`](https://brentyi.github.io/tyro/) or any that [generate interfaces from dataclasses](https://brentyi.github.io/tyro/goals_and_alternatives). diff --git a/docs/source/usage/general.md b/docs/source/usage/general.md new file mode 100644 index 00000000..35ae1a7f --- /dev/null +++ b/docs/source/usage/general.md @@ -0,0 +1,67 @@ +# General + +## Multiple functions in one script + +Consider multiple stages of training: pre-training, supervised fine-tuning, RLHF, etc. + +Normally, this kind of work is delegated to multiple scripts. Why? Each stage is complicated (prone to memory leaks) and we don't want them to interfere with each other. They may even require different degrees of parallelism. + +`torchrunx` solves these problems — even within a single script — by modularizing workloads into isolated, self-cleaning processes. + +```python +# 2 nodes x 8 GPUs +train_launcher = torchrunx.Launcher(hostnames=["node1", "node2"], workers_per_host=8) +# 1 GPU +eval_launcher = torchrunx.Launcher(hostnames=["node1"], workers_per_host=1) + +# Training & testing + +pretrained_model = train_launcher.run(train).rank(0) +pretrained_acc = eval_launcher.run(evaluation, model=pretrained_model).rank(0) +print(f"Pre-trained model accuracy: {pretrained_acc}") + +finetuned_model = train_launcher.run(finetuning, model=pretrained_model).rank(0) +finetuned_acc = eval_launcher.run(evaluation, model=finetuned_model).rank(0) +print(f"Fine-tuned model accuracy: {finetuned_acc}") +``` + +## Exceptions + +Exceptions that are raised in workers will be raised by the launcher process. A {mod}`torchrunx.AgentFailedError` or {mod}`torchrunx.WorkerFailedError` will be raised if any agent or worker dies unexpectedly (e.g. if sent a signal from the OS, due to segmentation faults or OOM). + +You can catch these errors and handle them as you wish! + +```python +for config in configs: # e.g. hyper-parameter sweep + try: + Launcher().run(train, config) + except torch.cuda.OutOfMemoryError: + print(f"{config} results in OOM... continuing...") +``` + +If you are expecting intermittent failures, you can catch errors and invoke retries: + +```python +for retry in range(3): + try: + Launcher().run(train, resume_from_checkpoint=True) + except torchrunx.WorkerFailedError as e: + print(f"Error occurred: {e}") + print(f"Retrying ({retry}) ...") + else: + break +``` + +## Environment variables + +Environment variables in the launcher process that (Unix pattern) match the [``copy_env_vars``](../api.md#torchrunx.Launcher.copy_env_vars) argument are automatically copied to agents and workers. We set useful defaults for Python and PyTorch. You could replace these. Or extend these like: + +```python +torchrunx.Launcher(copy_env_vars=( + torchrunx.DEFAULT_ENV_VARS_FOR_COPY + ("HF_HOME", "WANDB_*",) +)) +``` + +You can also pass (1) specific environment variables and values via [``extra_env_vars``](../api.md#torchrunx.Launcher.extra_env_vars) or (2) a ``.env``-style file via [``env_file``](../api.md#torchrunx.Launcher.env_file). Our agents `source {env_file}`. + +Finally, we set the following environment variables in each worker: `LOCAL_RANK`, `RANK`, `LOCAL_WORLD_SIZE`, `WORLD_SIZE`, `MASTER_ADDR`, and `MASTER_PORT`. diff --git a/docs/source/features/logging.md b/docs/source/usage/logging.md similarity index 100% rename from docs/source/features/logging.md rename to docs/source/usage/logging.md diff --git a/docs/source/features/slurm.md b/docs/source/usage/slurm.md similarity index 94% rename from docs/source/features/slurm.md rename to docs/source/usage/slurm.md index bffc4512..a1628041 100644 --- a/docs/source/features/slurm.md +++ b/docs/source/usage/slurm.md @@ -1,4 +1,4 @@ -# SLURM Integration +# Using SLURM By default, the `hostnames` or `workers_per_host` arguments are populated from the current SLURM allocation. If no allocation is detected, we assume 1 machine (localhost) with N workers (num. GPUs or CPUs). Raises a `RuntimeError` if `hostnames="slurm"` or `workers_per_host="slurm"` but no allocation is detected. From c987259be238bb48ecacac82e4daf89291b7f162 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 23 Feb 2025 13:34:04 -0500 Subject: [PATCH 130/141] logging and slurm docs --- docs/source/usage/general.md | 2 +- docs/source/usage/logging.md | 24 +++++++++++-- docs/source/usage/slurm.md | 61 ++++++++++++++++++++++++++++++++-- src/torchrunx/launcher.py | 11 +++--- src/torchrunx/utils/logging.py | 17 +++------- 5 files changed, 92 insertions(+), 23 deletions(-) diff --git a/docs/source/usage/general.md b/docs/source/usage/general.md index 35ae1a7f..9626b7ca 100644 --- a/docs/source/usage/general.md +++ b/docs/source/usage/general.md @@ -54,7 +54,7 @@ for retry in range(3): ## Environment variables -Environment variables in the launcher process that (Unix pattern) match the [``copy_env_vars``](../api.md#torchrunx.Launcher.copy_env_vars) argument are automatically copied to agents and workers. We set useful defaults for Python and PyTorch. You could replace these. Or extend these like: +Environment variables in the launcher process that pattern match the [``copy_env_vars``](../api.md#torchrunx.Launcher.copy_env_vars) argument are automatically copied to agents and workers. We set useful defaults for Python and PyTorch. You could replace these. Or extend these like: ```python torchrunx.Launcher(copy_env_vars=( diff --git a/docs/source/usage/logging.md b/docs/source/usage/logging.md index 52e6128d..14be733b 100644 --- a/docs/source/usage/logging.md +++ b/docs/source/usage/logging.md @@ -1,10 +1,12 @@ # Custom Logging -We forward all logs (i.e. from {mod}`logging` and {mod}`sys.stdout`/{mod}`sys.stderr`) from workers and agents to the launcher. By default, the logs from the first agent and its first worker are printed into the launcher's `stdout` stream. Logs from all agents and workers are written to files in `$TORCHRUNX_LOG_DIR` (default: `./torchrunx_logs`) and are named by timestamp, hostname, and local_rank. +We forward all worker and agent logs (i.e. from {mod}`logging`, {obj}`sys.stdout`, and {obj}`sys.stderr`) to the launcher for processing. -{mod}`logging.Handler` objects can be provided via the `handler_factory` argument to provide further customization (mapping specific agents/workers to custom output streams). You must pass a function that returns a list of {mod}`logging.Handler`s to ``handler_factory``. +By default, the logs from the rank 0 agent and worker are printed into the launcher's `stdout` stream. Logs from all agents and workers are written to a directory (by the current timestamp) in `$TORCHRUNX_LOG_DIR` (default: `./torchrunx_logs`). -We provide some utilities to help: +You can fully customize how logs are processed using {func}`torchrunx.Launcher.set_logging_handlers`. You should provide it a function that constructs and returns a list of {obj}`logging.Handler` objects. Each {obj}`logging.Handler` controls where logs should be written. + +We provide some handler utilities that direct a specified worker or agent's logs to a file or stream. ```{eval-rst} .. autofunction:: torchrunx.utils.file_handler @@ -14,6 +16,22 @@ We provide some utilities to help: .. autofunction:: torchrunx.utils.stream_handler ``` +For example, we could construct and pass a handler factory that streams the rank 0 agent and worker logs to the launcher's `stdout`. + +```python +def rank_0_handlers() -> list[logging.Handler]: + return [ + stream_handler(hostname=hostnames[0], local_rank=None), # agent 0 + stream_handler(hostname=hostnames[0], local_rank=0), # worker 0 + ] +``` + +```python +torchrunx.Launcher(...).set_logging_handlers(rank_0_handlers).run(...) +``` + +You can also [provide your own ``logging.Handler``](https://docs.python.org/3.9/library/logging.handlers.html#module-logging.handlers) and apply {func}`torchrunx.utils.add_filter_to_handler` to constrain which worker or agent's logs it should process. + ```{eval-rst} .. autofunction:: torchrunx.utils.add_filter_to_handler ``` diff --git a/docs/source/usage/slurm.md b/docs/source/usage/slurm.md index a1628041..3483f4f2 100644 --- a/docs/source/usage/slurm.md +++ b/docs/source/usage/slurm.md @@ -1,4 +1,61 @@ # Using SLURM -By default, the `hostnames` or `workers_per_host` arguments are populated from the current SLURM allocation. If no allocation is detected, we assume 1 machine (localhost) with N workers (num. GPUs or CPUs). -Raises a `RuntimeError` if `hostnames="slurm"` or `workers_per_host="slurm"` but no allocation is detected. +Normally, you are expected to provide the `hostnames` argument in {obj}`torchrunx.Launcher` to specify which nodes you would like to launch your function onto. + +If your script is running within a SLURM allocation and you set `hostnames` to `"auto"` (default) or `"slurm"`, we will automatically detect the available nodes and distribute your function onto all of these. A {exc}`RuntimeError` will be raised if `hostnames="slurm"` but no SLURM allocation is detected. + +## With `sbatch` + +You could have a script (`train.py`) that includes: + +```python +def distributed_training(): + ... + +if __name__ == "__main__": + torchrunx.Launcher( + # optionally specify: + # hostnames = "slurm", + # workers_per_host = "gpu" + ).run(distributed_training) +``` + +And some `run.batch` file (e.g. allocating 2 nodes with 2 GPUs each): + +```bash +#!/bin/bash +#SBATCH --job-name=torchrunx +#SBATCH --time=1:00:00 +#SBATCH --ntasks-per-node=1 +#SBATCH --nodes=2 +#SBATCH --gpus-per-node=2 + +# TODO: load your virutal environment +python train.py +``` + +`sbatch run.batch` should then run `python train.py` (the launcher process) on the primary machine in your SLURM allocation. The launcher will automatically distribute the training function onto both allocated nodes (and also parallelize it across the allocated GPUs). + +## With `submitit` + +If we use the [`submitit`](https://github.com/facebookincubator/submitit) Python library, we can do all of this from a single python script. + +```python +def distributed_training(): + ... + +def launch_training(): + torchrunx.Launcher( + # optionally specify: + # hostnames = "slurm", + # workers_per_host = "gpu" + ).run(distributed_training) + +if __name__ == "__main__": + executor = submitit.SlurmExecutor(folder="slurm_outputs") + executor.update_parameters( + use_srun=False, time=60, ntasks_per_node=1, + nodes=2, gpus_per_node=2 + ) + executor.submit(launch_training) +``` diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index e9d2c492..a6edbce5 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -74,17 +74,18 @@ class Launcher: field(default="auto", init=False) ) - def set_handler_factory( - self, factory: typing.Callable[[], list[logging.Handler]] | typing.Literal["auto"] | None + def set_logging_handlers( + self, + handler_factory: typing.Callable[[], list[logging.Handler]] | typing.Literal["auto"] | None, ) -> Self: - """Provide a ``factory`` to set custom handling of agent and worker logs. + """Provide a ``handler_factory`` function to customize processing of agent/worker logs. See `Custom Logging `_. Parameters: - factory: Factory function used to generate :obj:`logging.Handler` objects. + handler_factory: Function that constructs and returns :obj:`logging.Handler` objects. """ - self.handler_factory = factory + self.handler_factory = handler_factory return self def run( # noqa: C901, PLR0912, PLR0915 diff --git a/src/torchrunx/utils/logging.py b/src/torchrunx/utils/logging.py index c98bd160..71c5ea49 100644 --- a/src/torchrunx/utils/logging.py +++ b/src/torchrunx/utils/logging.py @@ -43,14 +43,7 @@ def add_filter_to_handler( local_rank: int | None, # None indicates agent log_level: int = logging.NOTSET, ) -> None: - """Apply a filter to :mod:`logging.Handler` so only specific worker logs are handled. - - Args: - handler: Handler to be modified. - hostname: Name of specified host. - local_rank: Rank of specified worker on host (or ``None`` for agent itself). - log_level: Minimum log level to capture. - """ + """Apply an agent- or worker- specific filter to :obj:`logging.Handler`.""" def _filter(record: WorkerLogRecord) -> bool: return ( @@ -67,11 +60,11 @@ def default_handlers( workers_per_host: list[int], log_level: int = logging.INFO, ) -> list[logging.Handler]: - """Default :mod:`logging.Handler`s for ``log_handlers="auto"`` in :mod:`torchrunx.launch`. + """Constructs default :obj:`logging.Handler` objects. - Logs for ``host[0]`` and its ``local_rank[0]`` worker are written to launcher process stdout. - Logs for all agents/workers are written to files in ``log_dir`` (named by timestamp, hostname, - local_rank). + Logs for the rank 0 agent and worker are written to launcher process stdout. + Logs for all hosts/workers are written to files in ``$TORCHRUNX_LOG_DIR`` (named by timestamp, + hostname, local_rank). """ log_dir = Path(os.environ.get("TORCHRUNX_LOG_DIR", "torchrunx_logs")) log_level = logging._nameToLevel[os.environ.get("TORCHRUNX_LOG_LEVEL", "INFO")] # noqa: SLF001 From 5a8845281b938dc6546e6c3d273ee3b524ad41f3 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 23 Feb 2025 15:43:51 -0500 Subject: [PATCH 131/141] torchrunx.__version__ --- src/torchrunx/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/torchrunx/__init__.py b/src/torchrunx/__init__.py index 0342f4b6..299f7626 100644 --- a/src/torchrunx/__init__.py +++ b/src/torchrunx/__init__.py @@ -1,6 +1,10 @@ +import importlib.metadata + from .launcher import DEFAULT_ENV_VARS_FOR_COPY, Launcher, LaunchResult from .utils.errors import AgentFailedError, WorkerFailedError +__version__ = importlib.metadata.version(__package__ or __name__) + __all__ = [ # noqa: RUF022 "DEFAULT_ENV_VARS_FOR_COPY", "Launcher", From 1584c9ca11ad3b8294deb87ef53ca5b2ed2fe81e Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 23 Feb 2025 16:06:16 -0500 Subject: [PATCH 132/141] addl CLI parsing --- docs/source/artifacts/accelerate_help.txt | 61 ++++---- docs/source/artifacts/argparse_cli_help.txt | 30 ++++ docs/source/artifacts/deepspeed_help.txt | 62 +++++---- docs/source/artifacts/lightning_help.txt | 55 +++++--- docs/source/artifacts/transformers_help.txt | 42 +++--- .../{cli_help.txt => tyro_cli_help.txt} | 18 ++- docs/source/usage/cli.md | 29 +++- scripts/examples/accelerate_train.py | 1 - scripts/examples/deepspeed_train.py | 14 ++ scripts/generate_help_menus.sh | 11 +- src/torchrunx/integrations/parsing.py | 130 ++++++++++++++++++ 11 files changed, 352 insertions(+), 101 deletions(-) create mode 100644 docs/source/artifacts/argparse_cli_help.txt rename docs/source/artifacts/{cli_help.txt => tyro_cli_help.txt} (75%) create mode 100644 src/torchrunx/integrations/parsing.py diff --git a/docs/source/artifacts/accelerate_help.txt b/docs/source/artifacts/accelerate_help.txt index 31e3b0b1..4af2d2ec 100644 --- a/docs/source/artifacts/accelerate_help.txt +++ b/docs/source/artifacts/accelerate_help.txt @@ -1,41 +1,56 @@ usage: accelerate_train.py [-h] [OPTIONS] ╭─ options ──────────────────────────────────────────────────────────────────╮ -│ -h, --help show this help message and exit │ -│ --batch-size INT (required) │ -│ --output-dir PATH (required) │ +│ -h, --help │ +│ show this help message and exit │ +│ --batch-size INT │ +│ (required) │ +│ --output-dir PATH │ +│ (required) │ ╰────────────────────────────────────────────────────────────────────────────╯ ╭─ launcher options ─────────────────────────────────────────────────────────╮ -│ Useful for sequential invocations or for specifying arguments via CLI. │ +│ For configuring the function launch environment. │ │ ────────────────────────────────────────────────────────────────────────── │ │ --launcher.hostnames {[STR [STR ...]]}|{auto,slurm} │ -│ (default: auto) │ -│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{auto,slurm} │ -│ (default: auto) │ +│ Nodes to launch the function on. By default, infer from SLURM, else │ +│ ``["localhost"]``. (default: auto) │ +│ --launcher.workers-per-host INT|{[INT [INT ...]]}|{cpu,gpu} │ +│ Number of processes to run per node. By default, number of GPUs per │ +│ host. (default: gpu) │ │ --launcher.ssh-config-file {None}|STR|PATHLIKE │ -│ (default: None) │ -│ --launcher.backend {None,nccl,gloo,mpi,ucc,auto} │ -│ (default: auto) │ -│ --launcher.timeout INT (default: 600) │ -│ --launcher.default-env-vars [STR [STR ...]] │ -│ (default: PATH LD_LIBRARY LIBRARY_PATH 'PYTHON*' │ -│ 'CUDA*' 'TORCH*' 'PYTORCH*' 'NCCL*') │ -│ --launcher.extra-env-vars [STR [STR ...]] │ -│ (default: ) │ +│ For connecting to nodes. By default, ``"~/.ssh/config"`` or │ +│ ``"/etc/ssh/ssh_config"``. (default: None) │ +│ --launcher.backend {None,nccl,gloo,mpi,ucc} │ +│ `Backend │ +│ docs/source/artifacts/cli_help.txt +uv run python -c "from argparse import ArgumentParser; from torchrunx.integrations.parsing import add_torchrunx_argument_group; parser = ArgumentParser(); add_torchrunx_argument_group(parser); parser.parse_args()" --help > docs/source/artifacts/argparse_cli_help.txt +uv run --with tyro python -c "import torchrunx; import tyro; tyro.cli(torchrunx.Launcher)" --help > docs/source/artifacts/tyro_cli_help.txt -uv run scripts/examples/transformers_train.py --help > docs/source/artifacts/transformers_help.txt -uv run scripts/examples/deepspeed_train.py --help > docs/source/artifacts/deepspeed_help.txt -uv run scripts/examples/lightning_train.py --help > docs/source/artifacts/lightning_help.txt -uv run scripts/examples/accelerate_train.py --help > docs/source/artifacts/accelerate_help.txt +uv run --with . scripts/examples/transformers_train.py --help > docs/source/artifacts/transformers_help.txt +uv run --with . scripts/examples/deepspeed_train.py --help > docs/source/artifacts/deepspeed_help.txt +uv run --with . scripts/examples/lightning_train.py --help > docs/source/artifacts/lightning_help.txt +uv run --with . scripts/examples/accelerate_train.py --help > docs/source/artifacts/accelerate_help.txt diff --git a/src/torchrunx/integrations/parsing.py b/src/torchrunx/integrations/parsing.py new file mode 100644 index 00000000..7554219e --- /dev/null +++ b/src/torchrunx/integrations/parsing.py @@ -0,0 +1,130 @@ +"""Utilities for building a Launcher from argparse command-line arguments.""" + +from __future__ import annotations + +__all__ = ["add_torchrunx_argument_group", "launcher_from_args"] + +from argparse import ArgumentParser, Namespace +from typing import Literal + +from torchrunx import DEFAULT_ENV_VARS_FOR_COPY, Launcher + + +def add_torchrunx_argument_group(parser: ArgumentParser) -> None: + """Add an argument group for torchrunx.Launcher to an ArgumentParser.""" + group = parser.add_argument_group("torchrunx") + + group.add_argument( + "--hostnames", + type=str, + nargs="+", + default="auto", + help="Nodes to launch the function on. Default: 'auto'. Use 'slurm' to infer from SLURM.", + ) + + group.add_argument( + "--workers-per-host", + type=str, + nargs="+", + default="gpu", + help="Processes to run per node. Can be 'cpu', 'gpu', or list[int]. Default: 'gpu'.", + ) + + group.add_argument( + "--ssh-config-file", + type=str, + default=None, + help="Path to SSH config file. Default: '~/.ssh/config' or '/etc/ssh/ssh_config'.", + ) + + group.add_argument( + "--backend", + type=str, + choices=["nccl", "gloo", "mpi", "ucc", "None"], + default="nccl", + help="For worker process group. Default: 'nccl'. Use 'gloo' for CPU. 'None' to disable.", + ) + + group.add_argument( + "--timeout", + type=int, + default=600, + help="Worker process group timeout in seconds. Default: 600.", + ) + + group.add_argument( + "--copy-env-vars", + type=str, + nargs="+", + default=DEFAULT_ENV_VARS_FOR_COPY, + help="Environment variables to copy to workers. Supports Unix pattern matching.", + ) + + group.add_argument( + "--extra-env-vars", + type=str, + nargs="*", + default=None, + help="Additional environment variables as key=value pairs.", + ) + + group.add_argument( + "--env-file", type=str, default=None, help="Path to a .env file with environment variables." + ) + + +def launcher_from_args(args: Namespace) -> Launcher: + """Create a torchrunx.Launcher from argparse.Namespace.""" + _hostnames: list[str] = args.hostnames + hostnames: list[str] | Literal["auto", "slurm"] + if _hostnames == ["auto"]: + hostnames = "auto" + elif _hostnames == ["slurm"]: + hostnames = "slurm" + else: + hostnames = _hostnames + + _workers_per_host: list[str] = args.workers_per_host + workers_per_host: int | list[int] | Literal["cpu", "gpu"] + + if _workers_per_host == ["cpu"]: + workers_per_host = "cpu" + elif _workers_per_host == ["gpu"]: + workers_per_host = "gpu" + elif len(_workers_per_host) == 1: + workers_per_host = int(_workers_per_host[0]) + else: + workers_per_host = [int(w) for w in _workers_per_host] + + ssh_config_file: str | None = args.ssh_config_file + + _backend: str = args.backend + backend: Literal["nccl", "gloo", "mpi", "ucc"] | None + if _backend == "None": # noqa: SIM108 + backend = None + else: + backend = _backend # pyright: ignore [reportAssignmentType] + + timeout: int = args.timeout + + copy_env_vars: tuple[str, ...] = tuple(args.copy_env_vars) + + _extra_env_vars: list[str] | None = args.extra_env_vars + extra_env_vars: dict[str, str] | None + if _extra_env_vars is not None: + extra_env_vars = dict(var.split("=", 1) for var in _extra_env_vars) + else: + extra_env_vars = None + + env_file: str | None = args.env_file + + return Launcher( + hostnames=hostnames, + workers_per_host=workers_per_host, + ssh_config_file=ssh_config_file, + backend=backend, + timeout=timeout, + copy_env_vars=copy_env_vars, + extra_env_vars=extra_env_vars, + env_file=env_file, + ) From 889f3ef2f41cea3d050f6f46b5c27d484ca9544b Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 23 Feb 2025 16:56:31 -0500 Subject: [PATCH 133/141] updated README --- README.md | 55 +++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 39 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 6cdb01ea..af61e194 100644 --- a/README.md +++ b/README.md @@ -15,17 +15,30 @@ By [Apoorv Khandelwal](https://apoorvkh.com) and [Peter Curtin](https://github.c **`torchrunx`** is a *functional* utility for distributing PyTorch code across devices. This is a [more convenient, robust, and featureful](#torchrunx-uniquely-offers) alternative to CLI-based launchers, like `torchrun`, `accelerate launch`, and `deepspeed`. +It enables complex workflows within a single script and has useful features even if only using 1 GPU. + ```bash pip install torchrunx ``` -Requires: Linux (+ SSH & shared filesystem if using multiple machines) +Requires: +- Linux +- If using multiple machines: SSH & shared filesystem --- -**Vanilla Example: Training a model on 2 machines with 2 GPUs each** +**Dummy example: parallelizing training with `torchrunx`** + +```python +def distributed_training(model: nn.Module, num_steps: int) -> nn.Module: + # Environment variables: RANK, LOCAL_RANK, ... + # ddp_model = DistributedDataParallel(model, device_ids=[local_rank]) + ... + retun trained_model +``` -Dummy distributed training function: +
+Implementation of

distributed_training

(click to expand)
```python from __future__ import annotations @@ -33,7 +46,7 @@ import os import torch import torch.nn as nn -def train(model: nn.Module, num_steps: int = 5) -> nn.Module | None: +def distributed_training(model: nn.Module, num_steps: int = 10) -> nn.Module | None: rank = int(os.environ['RANK']) local_rank = int(os.environ['LOCAL_RANK']) @@ -41,7 +54,7 @@ def train(model: nn.Module, num_steps: int = 5) -> nn.Module | None: ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=[local_rank]) optimizer = torch.optim.AdamW(ddp_model.parameters()) - for step in range(10): + for step in range(num_steps): optimizer.zero_grad() inputs = torch.randn(5, 10).to(local_rank) @@ -55,31 +68,41 @@ def train(model: nn.Module, num_steps: int = 5) -> nn.Module | None: return model.cpu() ``` -Launching training with `torchrunx`: +
```python import torchrunx -results = torchrunx.Launcher( +# Launch training on 2 machines x 2 GPUs + +launcher = torchrunx.Launcher( hostnames = ["localhost", "second_machine"], workers_per_host = 2 -).run( - train, +) + +results = launcher.run( + distributed_training, model = nn.Linear(10, 10), num_steps = 10 ) +``` +```python +# get the results trained_model: nn.Module = results.rank(0) +# or: results.index(hostname="localhost", local_rank=0) + +# and continue your script — e.g. save model to checkpoint torch.save(trained_model.state_dict(), "output/model.pth") ``` -**See examples where we fine-tune LLMs (e.g. GPT-2 on WikiText) using:** +**See examples where we fine-tune LLMs using:** - [Transformers](https://torchrun.xyz/examples/transformers.html) - [DeepSpeed](https://torchrun.xyz/examples/deepspeed.html) - [PyTorch Lightning](https://torchrun.xyz/examples/lightning.html) - [Accelerate](https://torchrun.xyz/examples/accelerate.html) -**Refer to our [API](https://torchrun.xyz/api.html) and [Advanced Usage Guide](https://torchrun.xyz/advanced.html) for many more capabilities!** +**Refer to our [API](https://torchrun.xyz/api.html) and [Usage](https://torchrun.xyz/usage/general.html) for many more capabilities!** --- @@ -98,7 +121,7 @@ torch.save(trained_model.state_dict(), "output/model.pth") 3. **Support for more complex workflows in a single script** 🎛️ -> Your workflow may have independent steps that need different parallelizations (e.g. training on 8 GPUs, testing on 1 GPU; comparing throughput on 4, then 8 GPUs; and so forth). CLI-based launchers naively parallelize the entire script for exactly *N* GPUs. In contrast, our library treats these steps in a modular way and permits *degrees* of parallelism in a single script. +> Your workflow may have steps that are complex (e.g. pre-train, fine-tune, test) or may different parallelizations (e.g. training on 8 GPUs, testing on 1 GPU). In these cases, CLI-based launchers require each step to live in its own script. Our library treats these steps in a modular way, so they can cleanly fit together in a single script! > > > We clean memory leaks as we go, so previous steps won't crash or adversely affect future steps. @@ -112,10 +135,10 @@ torch.save(trained_model.state_dict(), "output/model.pth") 5. **Bonus features** 🎁 -> - Typing for function arguments and return values. -> - Custom, fine-grained handling of logging, environment variables, and exception propagation. We have nice defaults too: no more interleaved logs and irrelevant exceptions! -> - No need to manually set up [`dist.init_process_group`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) -> - Automatic detection of SLURM environments. +> - Return objects from distributed functions. +> - [Automatic detection of SLURM environments.](https://torchrun.xyz/usage/slurm.html) > - Start multi-node training from Python notebooks! +> - Our library is fully typed! +> - Custom, fine-grained handling of logging, environment variables, and exception propagation. We have nice defaults too: no more interleaved logs and irrelevant exceptions! **On our [roadmap](https://github.com/apoorvkh/torchrunx/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement): higher-order parallelism, support for debuggers, and more!** From 510a8811b469b2706a6294a9e15f90004578d78d Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 23 Feb 2025 17:12:41 -0500 Subject: [PATCH 134/141] update readme --- README.md | 2 +- docs/conf.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index af61e194..22b42bab 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ def distributed_training(model: nn.Module, num_steps: int) -> nn.Module: ```
-Implementation of

distributed_training

(click to expand)
+Implementation of distributed_training (click to expand) ```python from __future__ import annotations diff --git a/docs/conf.py b/docs/conf.py index 355dd892..857a8c4f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,9 +1,15 @@ """Configuration file for the Sphinx documentation builder.""" from glob import glob import os +import re import shutil shutil.copyfile("../README.md", "source/README.md") +readme_f_str = open("source/README.md", "r").read() +readme_f_str = readme_f_str.replace("", '

').replace("", "

") +readme_f_str = re.sub(r"https://torchrun\.xyz/(.+?)\.html", r"./\1.md", readme_f_str) +open("source/README.md", "w").write(readme_f_str) + shutil.copyfile("../CONTRIBUTING.md", "source/contributing.md") os.makedirs("source/examples/scripts", exist_ok=True) From 128eaf4c058f8f0a1c126c92f29722b87c46b0a6 Mon Sep 17 00:00:00 2001 From: "peter_curtin@brown.edu" Date: Mon, 24 Feb 2025 12:46:53 -0500 Subject: [PATCH 135/141] fix agent loggin, basic logging in agent and launcher. --- src/torchrunx/__main__.py | 12 +++--- src/torchrunx/agent.py | 61 ++++++++++++++++++++---------- src/torchrunx/launcher.py | 21 ++++++++++ src/torchrunx/utils/comm.py | 14 +++++++ src/torchrunx/utils/environment.py | 16 ++++++-- tests/test_func.py | 4 +- 6 files changed, 96 insertions(+), 32 deletions(-) diff --git a/src/torchrunx/__main__.py b/src/torchrunx/__main__.py index 8626c1f8..2cf981a7 100644 --- a/src/torchrunx/__main__.py +++ b/src/torchrunx/__main__.py @@ -3,7 +3,7 @@ from argparse import ArgumentParser from .agent import main -from .utils.comm import LauncherAgentGroup +from .utils.comm import AgentCliArgs if __name__ == "__main__": parser = ArgumentParser() @@ -12,17 +12,17 @@ parser.add_argument("--logger-port", type=int) parser.add_argument("--world-size", type=int) parser.add_argument("--rank", type=int) + parser.add_argument("--hostname", type=str) args = parser.parse_args() - launcher_agent_group = LauncherAgentGroup( + agent_args = AgentCliArgs( launcher_hostname=args.launcher_hostname, launcher_port=args.launcher_port, world_size=args.world_size, rank=args.rank, - ) - - main( - launcher_agent_group, logger_hostname=args.launcher_hostname, logger_port=args.logger_port, + hostname=args.hostname, ) + + main(agent_args) diff --git a/src/torchrunx/agent.py b/src/torchrunx/agent.py index d21dbf3a..cb7f460a 100644 --- a/src/torchrunx/agent.py +++ b/src/torchrunx/agent.py @@ -14,6 +14,7 @@ import torch.distributed.elastic.multiprocessing as dist_mp from .utils.comm import ( + AgentCliArgs, AgentPayload, AgentStatus, LauncherAgentGroup, @@ -23,7 +24,9 @@ from .worker import WorkerArgs, worker_entrypoint -def main(launcher_agent_group: LauncherAgentGroup, logger_hostname: str, logger_port: int) -> None: +def main( + agent_args: AgentCliArgs, +) -> None: """Main function for agent processes (started on each node). This function spawns local worker processes (which run the target function). All agents monitor @@ -31,14 +34,40 @@ def main(launcher_agent_group: LauncherAgentGroup, logger_hostname: str, logger_ with each other (and launcher). All agents terminate if failure occurs in any agent. Arguments: - launcher_agent_group: The communication group between launcher and all agents. - logger_hostname: The hostname of the launcher (for logging). - logger_port: The port of the launcher (for logging). + agent_args: Command line arugments provided to the agent at launch. """ + # Stream logs to logging server + + logger = logging.getLogger() + redirect_stdio_to_logger(logger) + + log_records_to_socket( + logger=logger, + hostname=agent_args.hostname, + local_rank=None, + logger_hostname=agent_args.logger_hostname, + logger_port=agent_args.logger_port, + ) + + logging.debug("Agent logging setup.") + + # Set up launcher-agent group + + logging.debug("Initializing launcher-agent group.") + + launcher_agent_group = LauncherAgentGroup( + launcher_hostname=agent_args.launcher_hostname, + launcher_port=agent_args.launcher_port, + world_size=agent_args.world_size, + rank=agent_args.rank, + ) + agent_rank = launcher_agent_group.rank - 1 # Communicate initial payloads between launcher/agents + logging.debug("Sending agent details to launcher.") + payload = AgentPayload( hostname=socket.getfqdn(), port=get_open_port(), @@ -53,22 +82,10 @@ def main(launcher_agent_group: LauncherAgentGroup, logger_hostname: str, logger_ worker_global_ranks = launcher_payload.worker_global_ranks[agent_rank] num_workers = len(worker_global_ranks) - # Stream logs to logging server - - logger = logging.getLogger() - - log_records_to_socket( - logger=logger, - hostname=hostname, - local_rank=None, - logger_hostname=logger_hostname, - logger_port=logger_port, - ) - - redirect_stdio_to_logger(logger) - # Spawn worker processes + logging.debug("Launching worker processes.") + ctx = dist_mp.start_processes( name=f"{hostname}_", entrypoint=worker_entrypoint, @@ -76,8 +93,8 @@ def main(launcher_agent_group: LauncherAgentGroup, logger_hostname: str, logger_ i: ( WorkerArgs( function=launcher_payload.fn, - logger_hostname=logger_hostname, - logger_port=logger_port, + logger_hostname=agent_args.logger_hostname, + logger_port=agent_args.logger_port, main_agent_hostname=main_agent_payload.hostname, main_agent_port=main_agent_payload.port, backend=launcher_payload.backend, @@ -118,8 +135,12 @@ def main(launcher_agent_group: LauncherAgentGroup, logger_hostname: str, logger_ all_done = all(s.state == "done" for s in agent_statuses) any_failed = any(s.state == "failed" for s in agent_statuses) if all_done or any_failed: + logging.debug("Workers exiting %s.", "cleanly" if not any_failed else "with errors") break finally: ctx.close() sys.stdout.flush() sys.stderr.flush() + launcher_agent_group.shutdown() + + logging.debug("Agent exiting.") diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index a6edbce5..9437d480 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -45,6 +45,8 @@ FunctionP = ParamSpec("FunctionP") FunctionR = TypeVar("FunctionR") +logger = logging.getLogger(__name__) + @dataclass class Launcher: @@ -107,6 +109,7 @@ def run( # noqa: C901, PLR0912, PLR0915 raise RuntimeError(msg) ### + logger.debug("Resolving environment.") hostnames, workers_per_host = resolve_environment( self.hostnames, self.workers_per_host, ssh_config_file=self.ssh_config_file @@ -160,6 +163,8 @@ def handler_factory() -> list[logging.Handler]: agent_payloads = None try: + logger.debug("Starting logging server.") + # Start logging server (recieves LogRecords from agents/workers) logging_server_args = LoggingServerArgs( @@ -178,6 +183,8 @@ def handler_factory() -> list[logging.Handler]: log_process.start() + logger.debug("Launching agents.") + # Start agents on each node for i, hostname in enumerate(hostnames): @@ -190,11 +197,14 @@ def handler_factory() -> list[logging.Handler]: rank=i + 1, env_vars=env_vars, env_file=env_file, + hostname=hostname, ), hostname=hostname, ssh_config_file=ssh_config_file, ) + logger.debug("Initializing launcher-agent group.") + # Initialize launcher-agent process group # ranks = (launcher, agent_{hostnames[0]}, ..., agent[-1]) @@ -205,10 +215,14 @@ def handler_factory() -> list[logging.Handler]: rank=0, ) + logger.debug("Receiving agent details.") + # Sync initial payloads between launcher and agents launcher_payload, agent_payloads = launcher_agent_group.sync_payloads(payload=payload) + logger.debug("Entering agent monitoring loop.") + # Monitor agent statuses (until failed or done) while True: @@ -224,20 +238,27 @@ def handler_factory() -> list[logging.Handler]: raise v if all(s.state == "done" for s in agent_statuses): + logger.debug("All workers exited cleanly.") return_values: list[list[FunctionR]] = [s.return_values for s in agent_statuses] # pyright: ignore [reportAssignmentType] return LaunchResult.from_returns(hostnames, return_values) finally: + logger.debug("Stopping logging server.") + if stop_logging_event is not None: stop_logging_event.set() if log_process is not None: log_process.kill() + logger.debug("Killing launcher-agent group.") + if launcher_agent_group is not None: launcher_agent_group.shutdown() # cleanup: SIGTERM all agents if agent_payloads is not None: for agent_payload, agent_hostname in zip(agent_payloads, hostnames): + logger.debug("Killing PID %s on %s.", agent_payload.process_id, agent_hostname) + execute_command( command=f"kill {agent_payload.process_id}", hostname=agent_hostname, diff --git a/src/torchrunx/utils/comm.py b/src/torchrunx/utils/comm.py index da68563f..db2ee1fb 100644 --- a/src/torchrunx/utils/comm.py +++ b/src/torchrunx/utils/comm.py @@ -38,6 +38,19 @@ def get_open_port() -> int: FunctionR = TypeVar("FunctionR") +@dataclass +class AgentCliArgs: + """Briefly holds agent CLI arguments after torchrunx.__main__ is called.""" + + launcher_hostname: str + launcher_port: int + world_size: int + rank: int + logger_hostname: str + logger_port: int + hostname: str + + @dataclass class LauncherAgentGroup(Generic[FunctionR]): """Initializes a GLOO distributed process group between launcher and all agents.""" @@ -108,6 +121,7 @@ def sync_agent_statuses( def shutdown(self) -> None: """Terminate process group.""" + dist.barrier(group=self.group) dist.destroy_process_group(group=self.group) diff --git a/src/torchrunx/utils/environment.py b/src/torchrunx/utils/environment.py index 792d8b02..aad8e97f 100644 --- a/src/torchrunx/utils/environment.py +++ b/src/torchrunx/utils/environment.py @@ -102,7 +102,10 @@ def get_gpus_per_host( return [ int( execute_command( - command, hostname, ssh_config_file=ssh_config_file, return_stdout_stderr=True + command, + hostname, + ssh_config_file=ssh_config_file, + return_stdout_stderr=True, )[0] ) for hostname in hostnames @@ -117,6 +120,7 @@ def build_launch_command( rank: int, env_vars: dict[str, str], env_file: str | os.PathLike | None, + hostname: str, ) -> str: """Generator for command to launch torchrunx on an agent.""" # shlex.quote prevents shell injection here (resolves S602 in execute_command) @@ -134,6 +138,7 @@ def build_launch_command( python = shlex.quote(sys.executable) launcher_hostname = shlex.quote(launcher_hostname) + hostname = shlex.quote(hostname) commands.append( f"{python} -u -m torchrunx " @@ -141,7 +146,8 @@ def build_launch_command( f"--launcher-port {launcher_port} " f"--logger-port {logger_port} " f"--world-size {world_size} " - f"--rank {rank}", + f"--rank {rank} " + f"--hostname {hostname}", ) return " && ".join(commands) @@ -171,7 +177,11 @@ def execute_command( # S602: subprocess.Popen is called with shell=True (https://docs.python.org/3.9/library/subprocess.html#security-considerations) # Made sure to shlex.quote arguments in build_command to prevent shell injection process = subprocess.Popen( # noqa: S602 - command, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE + command, + shell=True, + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, ) if return_stdout_stderr: diff --git a/tests/test_func.py b/tests/test_func.py index 1a0fc5cb..5ea61d53 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -9,9 +9,7 @@ def test_launch() -> None: - result = trx.Launcher( - hostnames="slurm", - ).run(simple_matmul) + result = trx.Launcher(hostnames="slurm").run(simple_matmul) result_values = reduce(add, result.results.values()) From 2b76c49d2023ec6e0d2708dabefa4bd2c955c09a Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Thu, 27 Feb 2025 15:23:16 -0500 Subject: [PATCH 136/141] rm AgentCliArgs --- src/torchrunx/__main__.py | 5 +---- src/torchrunx/agent.py | 35 +++++++++++++++++++++++------------ src/torchrunx/utils/comm.py | 14 -------------- 3 files changed, 24 insertions(+), 30 deletions(-) diff --git a/src/torchrunx/__main__.py b/src/torchrunx/__main__.py index 2cf981a7..f95d6328 100644 --- a/src/torchrunx/__main__.py +++ b/src/torchrunx/__main__.py @@ -3,7 +3,6 @@ from argparse import ArgumentParser from .agent import main -from .utils.comm import AgentCliArgs if __name__ == "__main__": parser = ArgumentParser() @@ -15,7 +14,7 @@ parser.add_argument("--hostname", type=str) args = parser.parse_args() - agent_args = AgentCliArgs( + main( launcher_hostname=args.launcher_hostname, launcher_port=args.launcher_port, world_size=args.world_size, @@ -24,5 +23,3 @@ logger_port=args.logger_port, hostname=args.hostname, ) - - main(agent_args) diff --git a/src/torchrunx/agent.py b/src/torchrunx/agent.py index cb7f460a..15b9d249 100644 --- a/src/torchrunx/agent.py +++ b/src/torchrunx/agent.py @@ -14,7 +14,6 @@ import torch.distributed.elastic.multiprocessing as dist_mp from .utils.comm import ( - AgentCliArgs, AgentPayload, AgentStatus, LauncherAgentGroup, @@ -25,7 +24,13 @@ def main( - agent_args: AgentCliArgs, + launcher_hostname: str, + launcher_port: int, + world_size: int, + rank: int, + logger_hostname: str, + logger_port: int, + hostname: str, ) -> None: """Main function for agent processes (started on each node). @@ -34,7 +39,13 @@ def main( with each other (and launcher). All agents terminate if failure occurs in any agent. Arguments: - agent_args: Command line arugments provided to the agent at launch. + launcher_hostname: Hostname of the launcher process. + launcher_port: Port for the process group on the launcher. + world_size: Number of agents + 1 (launcher). + rank: Rank of this agent. + logger_hostname: Hostname of the logging server. + logger_port: Port for the logging server. + hostname: Hostname of this agent. """ # Stream logs to logging server @@ -43,10 +54,10 @@ def main( log_records_to_socket( logger=logger, - hostname=agent_args.hostname, + hostname=hostname, local_rank=None, - logger_hostname=agent_args.logger_hostname, - logger_port=agent_args.logger_port, + logger_hostname=logger_hostname, + logger_port=logger_port, ) logging.debug("Agent logging setup.") @@ -56,10 +67,10 @@ def main( logging.debug("Initializing launcher-agent group.") launcher_agent_group = LauncherAgentGroup( - launcher_hostname=agent_args.launcher_hostname, - launcher_port=agent_args.launcher_port, - world_size=agent_args.world_size, - rank=agent_args.rank, + launcher_hostname=launcher_hostname, + launcher_port=launcher_port, + world_size=world_size, + rank=rank, ) agent_rank = launcher_agent_group.rank - 1 @@ -93,8 +104,8 @@ def main( i: ( WorkerArgs( function=launcher_payload.fn, - logger_hostname=agent_args.logger_hostname, - logger_port=agent_args.logger_port, + logger_hostname=logger_hostname, + logger_port=logger_port, main_agent_hostname=main_agent_payload.hostname, main_agent_port=main_agent_payload.port, backend=launcher_payload.backend, diff --git a/src/torchrunx/utils/comm.py b/src/torchrunx/utils/comm.py index db2ee1fb..da68563f 100644 --- a/src/torchrunx/utils/comm.py +++ b/src/torchrunx/utils/comm.py @@ -38,19 +38,6 @@ def get_open_port() -> int: FunctionR = TypeVar("FunctionR") -@dataclass -class AgentCliArgs: - """Briefly holds agent CLI arguments after torchrunx.__main__ is called.""" - - launcher_hostname: str - launcher_port: int - world_size: int - rank: int - logger_hostname: str - logger_port: int - hostname: str - - @dataclass class LauncherAgentGroup(Generic[FunctionR]): """Initializes a GLOO distributed process group between launcher and all agents.""" @@ -121,7 +108,6 @@ def sync_agent_statuses( def shutdown(self) -> None: """Terminate process group.""" - dist.barrier(group=self.group) dist.destroy_process_group(group=self.group) From cd260f51f8437c28e1c1d72455a90caba0e98fd8 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Fri, 28 Feb 2025 14:01:56 -0500 Subject: [PATCH 137/141] updates to logging --- src/torchrunx/agent.py | 31 ++++++------- .../integrations/{parsing.py => cli.py} | 0 src/torchrunx/launcher.py | 15 +++---- src/torchrunx/utils/__init__.py | 3 -- src/torchrunx/utils/comm.py | 2 +- src/torchrunx/utils/{logging.py => logs.py} | 43 +++++++++---------- src/torchrunx/worker.py | 22 ++++------ 7 files changed, 51 insertions(+), 65 deletions(-) rename src/torchrunx/integrations/{parsing.py => cli.py} (100%) rename src/torchrunx/utils/{logging.py => logs.py} (89%) diff --git a/src/torchrunx/agent.py b/src/torchrunx/agent.py index 15b9d249..05b81e57 100644 --- a/src/torchrunx/agent.py +++ b/src/torchrunx/agent.py @@ -19,7 +19,7 @@ LauncherAgentGroup, get_open_port, ) -from .utils.logging import log_records_to_socket, redirect_stdio_to_logger +from .utils.logs import log_records_to_socket, redirect_stdio_to_logger from .worker import WorkerArgs, worker_entrypoint @@ -48,23 +48,19 @@ def main( hostname: Hostname of this agent. """ # Stream logs to logging server - - logger = logging.getLogger() - redirect_stdio_to_logger(logger) + logger = logging.getLogger(f"{__package__}.{hostname}") log_records_to_socket( - logger=logger, - hostname=hostname, - local_rank=None, - logger_hostname=logger_hostname, - logger_port=logger_port, + hostname=hostname, local_rank=None, logger_hostname=logger_hostname, logger_port=logger_port ) - logging.debug("Agent logging setup.") + redirect_stdio_to_logger(logger) + + logger.debug("Agent logging setup.") # Set up launcher-agent group - logging.debug("Initializing launcher-agent group.") + logger.debug("Initializing launcher-agent group.") launcher_agent_group = LauncherAgentGroup( launcher_hostname=launcher_hostname, @@ -77,7 +73,7 @@ def main( # Communicate initial payloads between launcher/agents - logging.debug("Sending agent details to launcher.") + logger.debug("Sending agent details to launcher.") payload = AgentPayload( hostname=socket.getfqdn(), @@ -86,7 +82,6 @@ def main( ) launcher_payload, agent_payloads = launcher_agent_group.sync_payloads(payload=payload) - main_agent_payload = agent_payloads[0] hostname = launcher_payload.hostnames[agent_rank] worker_world_size = launcher_payload.worker_world_size @@ -95,7 +90,7 @@ def main( # Spawn worker processes - logging.debug("Launching worker processes.") + logger.debug("Launching worker processes.") ctx = dist_mp.start_processes( name=f"{hostname}_", @@ -106,8 +101,8 @@ def main( function=launcher_payload.fn, logger_hostname=logger_hostname, logger_port=logger_port, - main_agent_hostname=main_agent_payload.hostname, - main_agent_port=main_agent_payload.port, + master_hostname=agent_payloads[0].hostname, + master_port=agent_payloads[0].port, backend=launcher_payload.backend, rank=worker_global_ranks[i], local_rank=i, @@ -146,7 +141,7 @@ def main( all_done = all(s.state == "done" for s in agent_statuses) any_failed = any(s.state == "failed" for s in agent_statuses) if all_done or any_failed: - logging.debug("Workers exiting %s.", "cleanly" if not any_failed else "with errors") + logger.debug("Workers exiting %s.", "cleanly" if not any_failed else "with errors") break finally: ctx.close() @@ -154,4 +149,4 @@ def main( sys.stderr.flush() launcher_agent_group.shutdown() - logging.debug("Agent exiting.") + logger.debug("Agent exiting.") diff --git a/src/torchrunx/integrations/parsing.py b/src/torchrunx/integrations/cli.py similarity index 100% rename from src/torchrunx/integrations/parsing.py rename to src/torchrunx/integrations/cli.py diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 9437d480..1196b708 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -29,7 +29,7 @@ resolve_environment, ) from .utils.errors import ExceptionFromWorker, WorkerFailedError -from .utils.logging import LoggingServerArgs, default_handlers, start_logging_server +from .utils.logs import LoggingServerArgs, default_handlers, start_logging_server DEFAULT_ENV_VARS_FOR_COPY = ( "PATH", @@ -45,8 +45,6 @@ FunctionP = ParamSpec("FunctionP") FunctionR = TypeVar("FunctionR") -logger = logging.getLogger(__name__) - @dataclass class Launcher: @@ -104,6 +102,8 @@ def run( # noqa: C901, PLR0912, PLR0915 WorkerFailedError: If a worker fails (e.g. from a segmentation fault). AgentFailedError: If an agent fails, e.g. from an OS signal. """ + logger = logging.getLogger(__package__) + if not dist.is_available(): msg = "The torch.distributed package is not available." raise RuntimeError(msg) @@ -249,11 +249,6 @@ def handler_factory() -> list[logging.Handler]: if log_process is not None: log_process.kill() - logger.debug("Killing launcher-agent group.") - - if launcher_agent_group is not None: - launcher_agent_group.shutdown() - # cleanup: SIGTERM all agents if agent_payloads is not None: for agent_payload, agent_hostname in zip(agent_payloads, hostnames): @@ -265,6 +260,10 @@ def handler_factory() -> list[logging.Handler]: ssh_config_file=ssh_config_file, ) + if launcher_agent_group is not None: + logger.debug("Killing launcher-agent group.") + launcher_agent_group.shutdown() + @dataclass class LaunchResult(Generic[FunctionR]): diff --git a/src/torchrunx/utils/__init__.py b/src/torchrunx/utils/__init__.py index d6b94d17..e69de29b 100644 --- a/src/torchrunx/utils/__init__.py +++ b/src/torchrunx/utils/__init__.py @@ -1,3 +0,0 @@ -from .logging import add_filter_to_handler, file_handler, stream_handler - -__all__ = ["add_filter_to_handler", "file_handler", "stream_handler"] diff --git a/src/torchrunx/utils/comm.py b/src/torchrunx/utils/comm.py index da68563f..0edbb681 100644 --- a/src/torchrunx/utils/comm.py +++ b/src/torchrunx/utils/comm.py @@ -119,7 +119,7 @@ class LauncherPayload: hostnames: list[str] worker_global_ranks: list[list[int]] worker_world_size: int - backend: Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None + backend: Literal["nccl", "gloo", "mpi", "ucc"] | None timeout: int diff --git a/src/torchrunx/utils/logging.py b/src/torchrunx/utils/logs.py similarity index 89% rename from src/torchrunx/utils/logging.py rename to src/torchrunx/utils/logs.py index 71c5ea49..0558131d 100644 --- a/src/torchrunx/utils/logging.py +++ b/src/torchrunx/utils/logs.py @@ -1,4 +1,4 @@ -"""Utilities for intercepting logs in worker processes and handling these in the Launcher.""" # noqa: A005 +"""Utilities for intercepting logs in worker processes and handling these in the Launcher.""" from __future__ import annotations @@ -24,7 +24,7 @@ from contextlib import redirect_stderr, redirect_stdout from dataclasses import dataclass from io import StringIO -from logging import Handler, Logger +from logging import Handler, Logger, LogRecord from logging.handlers import SocketHandler from multiprocessing.synchronize import Event as EventClass from pathlib import Path @@ -55,11 +55,7 @@ def _filter(record: WorkerLogRecord) -> bool: handler.addFilter(_filter) # pyright: ignore [reportArgumentType] -def default_handlers( - hostnames: list[str], - workers_per_host: list[int], - log_level: int = logging.INFO, -) -> list[logging.Handler]: +def default_handlers(hostnames: list[str], workers_per_host: list[int]) -> list[logging.Handler]: """Constructs default :obj:`logging.Handler` objects. Logs for the rank 0 agent and worker are written to launcher process stdout. @@ -67,28 +63,32 @@ def default_handlers( hostname, local_rank). """ log_dir = Path(os.environ.get("TORCHRUNX_LOG_DIR", "torchrunx_logs")) - log_level = logging._nameToLevel[os.environ.get("TORCHRUNX_LOG_LEVEL", "INFO")] # noqa: SLF001 + file_log_level = logging._nameToLevel[os.environ.get("TORCHRUNX_LOG_LEVEL", "INFO")] # noqa: SLF001 + return [ - stream_handler(hostname=hostnames[0], local_rank=None, log_level=log_level), - stream_handler(hostname=hostnames[0], local_rank=0, log_level=log_level), - *file_handlers(hostnames, workers_per_host, log_dir=log_dir, log_level=log_level), + RedirectHandler(hostname=hostnames[0], local_rank=None), + RedirectHandler(hostname=hostnames[0], local_rank=0), + *file_handlers(hostnames, workers_per_host, log_dir=log_dir, log_level=file_log_level), ] +class RedirectHandler(logging.Handler): + def __init__(self, hostname: str, local_rank: int | None) -> None: + super().__init__() + add_filter_to_handler(self, hostname=hostname, local_rank=local_rank) + + def emit(self, record: LogRecord) -> None: + logger = logging.getLogger(record.name) + if logger.isEnabledFor(record.levelno): + logger.handle(record) + + def stream_handler( hostname: str, local_rank: int | None, log_level: int = logging.NOTSET ) -> logging.Handler: """Handler builder function for writing logs from specified hostname/rank to stdout.""" handler = logging.StreamHandler(stream=sys.stdout) add_filter_to_handler(handler, hostname, local_rank, log_level=log_level) - handler.setFormatter( - logging.Formatter( - "%(asctime)s:%(levelname)s:%(hostname)s[%(local_rank)s]: %(message)s" - if local_rank is not None - else "%(asctime)s:%(levelname)s:%(hostname)s: %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - ), - ) return handler @@ -259,14 +259,13 @@ def from_record(cls, record: logging.LogRecord, hostname: str, local_rank: int | def log_records_to_socket( - logger: Logger, hostname: str, local_rank: int | None, # None indicates agent logger_hostname: str, logger_port: int, ) -> None: """Encode LogRecords with hostname/local_rank. Send to TCP socket on Launcher.""" - logger.setLevel(logging.NOTSET) + logging.root.setLevel(logging.NOTSET) old_factory = logging.getLogRecordFactory() @@ -276,4 +275,4 @@ def record_factory(*args, **kwargs) -> WorkerLogRecord: # noqa: ANN002, ANN003 logging.setLogRecordFactory(record_factory) - logger.addHandler(SocketHandler(host=logger_hostname, port=logger_port)) + logging.root.addHandler(SocketHandler(host=logger_hostname, port=logger_port)) diff --git a/src/torchrunx/worker.py b/src/torchrunx/worker.py index 422f9cc4..0da0c7e9 100644 --- a/src/torchrunx/worker.py +++ b/src/torchrunx/worker.py @@ -11,12 +11,11 @@ from typing import Any, Callable, Literal import cloudpickle -import torch import torch.distributed as dist from typing_extensions import Self from .utils.errors import ExceptionFromWorker -from .utils.logging import log_records_to_socket, redirect_stdio_to_logger +from .utils.logs import log_records_to_socket, redirect_stdio_to_logger __all__ = ["WorkerArgs", "worker_entrypoint"] @@ -28,9 +27,9 @@ class WorkerArgs: function: Callable logger_hostname: str logger_port: int - main_agent_hostname: str - main_agent_port: int - backend: Literal["nccl", "gloo", "mpi", "ucc", "auto"] | None + master_hostname: str + master_port: int + backend: Literal["nccl", "gloo", "mpi", "ucc"] | None rank: int local_rank: int node_rank: int @@ -60,10 +59,9 @@ def worker_entrypoint(serialized_worker_args: bytes) -> Any | ExceptionFromWorke # Start logging to the logging server (i.e. the launcher) - logger = logging.getLogger() + logger = logging.getLogger(f"{__package__}.{worker_args.hostname}.{worker_args.local_rank}") log_records_to_socket( - logger=logger, hostname=worker_args.hostname, local_rank=worker_args.local_rank, logger_hostname=worker_args.logger_hostname, @@ -79,23 +77,21 @@ def worker_entrypoint(serialized_worker_args: bytes) -> Any | ExceptionFromWorke os.environ["GROUP_RANK"] = str(worker_args.node_rank) os.environ["LOCAL_WORLD_SIZE"] = str(worker_args.local_world_size) os.environ["WORLD_SIZE"] = str(worker_args.world_size) - os.environ["MASTER_ADDR"] = worker_args.main_agent_hostname - os.environ["MASTER_PORT"] = str(worker_args.main_agent_port) + os.environ["MASTER_ADDR"] = worker_args.master_hostname + os.environ["MASTER_PORT"] = str(worker_args.master_port) # Prepare the process group (e.g. for communication within the user's function) if worker_args.backend is not None: backend = worker_args.backend - if backend == "auto": - backend = "nccl" if torch.cuda.is_available() else "gloo" dist.init_process_group( backend=backend, world_size=worker_args.world_size, rank=worker_args.rank, store=dist.TCPStore( # pyright: ignore [reportPrivateImportUsage] - host_name=worker_args.main_agent_hostname, - port=worker_args.main_agent_port, + host_name=worker_args.master_hostname, + port=worker_args.master_port, world_size=worker_args.world_size, is_master=(worker_args.rank == 0), ), From 41d97597fc88b4789c9a46bb41ede867692f71d2 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Sun, 2 Mar 2025 01:49:48 -0500 Subject: [PATCH 138/141] small adjustments to logging messages --- pyproject.toml | 1 + src/torchrunx/agent.py | 21 ++++++++------------- src/torchrunx/launcher.py | 30 +++++++++++++++--------------- 3 files changed, 24 insertions(+), 28 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7925c7b7..dfffaf32 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,6 +54,7 @@ ignore = [ "S607", # bandit: subprocess "COM812", "ISC001", # conflict with formatter + "G004" # f-string in logging ] [tool.ruff.lint.per-file-ignores] "tests/**/*.py" = [ diff --git a/src/torchrunx/agent.py b/src/torchrunx/agent.py index 05b81e57..6209e456 100644 --- a/src/torchrunx/agent.py +++ b/src/torchrunx/agent.py @@ -47,7 +47,8 @@ def main( logger_port: Port for the logging server. hostname: Hostname of this agent. """ - # Stream logs to logging server + # Setup logging & stream logs to server + logger = logging.getLogger(f"{__package__}.{hostname}") log_records_to_socket( @@ -56,10 +57,6 @@ def main( redirect_stdio_to_logger(logger) - logger.debug("Agent logging setup.") - - # Set up launcher-agent group - logger.debug("Initializing launcher-agent group.") launcher_agent_group = LauncherAgentGroup( @@ -71,9 +68,7 @@ def main( agent_rank = launcher_agent_group.rank - 1 - # Communicate initial payloads between launcher/agents - - logger.debug("Sending agent details to launcher.") + logger.debug("Synchronizing launcher and agents.") payload = AgentPayload( hostname=socket.getfqdn(), @@ -88,9 +83,7 @@ def main( worker_global_ranks = launcher_payload.worker_global_ranks[agent_rank] num_workers = len(worker_global_ranks) - # Spawn worker processes - - logger.debug("Launching worker processes.") + logger.info(f"Starting {num_workers} worker processes.") ctx = dist_mp.start_processes( name=f"{hostname}_", @@ -128,6 +121,8 @@ def main( # Monitor and communicate agent statuses # Terminate gracefully upon failure + logger.debug("Entering worker monitoring and agent communication loop.") + try: status = None while True: @@ -141,7 +136,7 @@ def main( all_done = all(s.state == "done" for s in agent_statuses) any_failed = any(s.state == "failed" for s in agent_statuses) if all_done or any_failed: - logger.debug("Workers exiting %s.", "cleanly" if not any_failed else "with errors") + logger.info(f"Workers exited {'with' if any_failed else 'without'} errors.") break finally: ctx.close() @@ -149,4 +144,4 @@ def main( sys.stderr.flush() launcher_agent_group.shutdown() - logger.debug("Agent exiting.") + logger.debug("Terminating agent process.") diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 1196b708..2df0cf10 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -108,8 +108,9 @@ def run( # noqa: C901, PLR0912, PLR0915 msg = "The torch.distributed package is not available." raise RuntimeError(msg) + logger.debug("Preparing launch environment.") + ### - logger.debug("Resolving environment.") hostnames, workers_per_host = resolve_environment( self.hostnames, self.workers_per_host, ssh_config_file=self.ssh_config_file @@ -183,11 +184,11 @@ def handler_factory() -> list[logging.Handler]: log_process.start() - logger.debug("Launching agents.") - # Start agents on each node for i, hostname in enumerate(hostnames): + logger.info(f'Launching "{func.__name__}" on {hostname}.') + execute_command( command=build_launch_command( launcher_hostname=launcher_hostname, @@ -215,16 +216,15 @@ def handler_factory() -> list[logging.Handler]: rank=0, ) - logger.debug("Receiving agent details.") - # Sync initial payloads between launcher and agents + logger.debug("Synchronizing launcher and agents.") launcher_payload, agent_payloads = launcher_agent_group.sync_payloads(payload=payload) - logger.debug("Entering agent monitoring loop.") - # Monitor agent statuses (until failed or done) + logger.debug("Entering agent monitoring loop.") + while True: # could raise AgentFailedError agent_statuses = launcher_agent_group.sync_agent_statuses(status=None) @@ -238,17 +238,10 @@ def handler_factory() -> list[logging.Handler]: raise v if all(s.state == "done" for s in agent_statuses): - logger.debug("All workers exited cleanly.") + logger.info("All workers completed successfully.") return_values: list[list[FunctionR]] = [s.return_values for s in agent_statuses] # pyright: ignore [reportAssignmentType] return LaunchResult.from_returns(hostnames, return_values) finally: - logger.debug("Stopping logging server.") - - if stop_logging_event is not None: - stop_logging_event.set() - if log_process is not None: - log_process.kill() - # cleanup: SIGTERM all agents if agent_payloads is not None: for agent_payload, agent_hostname in zip(agent_payloads, hostnames): @@ -264,6 +257,13 @@ def handler_factory() -> list[logging.Handler]: logger.debug("Killing launcher-agent group.") launcher_agent_group.shutdown() + logger.debug("Stopping logging server.") + + if stop_logging_event is not None: + stop_logging_event.set() + if log_process is not None: + log_process.kill() + @dataclass class LaunchResult(Generic[FunctionR]): From d4123c85b9ccfcb826b0d98edd2377703bf2b8b8 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 3 Mar 2025 15:11:48 -0500 Subject: [PATCH 139/141] accept int in log level env var --- src/torchrunx/utils/logs.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/torchrunx/utils/logs.py b/src/torchrunx/utils/logs.py index 0558131d..7a52d379 100644 --- a/src/torchrunx/utils/logs.py +++ b/src/torchrunx/utils/logs.py @@ -63,7 +63,18 @@ def default_handlers(hostnames: list[str], workers_per_host: list[int]) -> list[ hostname, local_rank). """ log_dir = Path(os.environ.get("TORCHRUNX_LOG_DIR", "torchrunx_logs")) - file_log_level = logging._nameToLevel[os.environ.get("TORCHRUNX_LOG_LEVEL", "INFO")] # noqa: SLF001 + + file_log_level = os.environ.get("TORCHRUNX_LOG_LEVEL", "INFO") + if file_log_level.isdigit(): + file_log_level = int(file_log_level) + elif file_log_level in logging._nameToLevel: # noqa: SLF001 + file_log_level = logging._nameToLevel[file_log_level] # noqa: SLF001 + else: + msg = ( + f"Invalid value for $TORCHRUNX_LOG_LEVEL: {file_log_level}. " + f"Should be a positive integer or any of: {', '.join(logging._nameToLevel.keys())}." # noqa: SLF001 + ) + raise ValueError(msg) return [ RedirectHandler(hostname=hostnames[0], local_rank=None), From ff0b90ee45ab363bafd31cbe1ed8cce589d89269 Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 10 Mar 2025 01:25:57 -0400 Subject: [PATCH 140/141] updates to logging hierarchy --- src/torchrunx/agent.py | 3 +-- src/torchrunx/utils/logs.py | 14 +++++++++++++- src/torchrunx/worker.py | 3 +-- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/src/torchrunx/agent.py b/src/torchrunx/agent.py index 6209e456..2225e5bd 100644 --- a/src/torchrunx/agent.py +++ b/src/torchrunx/agent.py @@ -49,12 +49,11 @@ def main( """ # Setup logging & stream logs to server - logger = logging.getLogger(f"{__package__}.{hostname}") - log_records_to_socket( hostname=hostname, local_rank=None, logger_hostname=logger_hostname, logger_port=logger_port ) + logger = logging.getLogger() redirect_stdio_to_logger(logger) logger.debug("Initializing launcher-agent group.") diff --git a/src/torchrunx/utils/logs.py b/src/torchrunx/utils/logs.py index 7a52d379..b09098b6 100644 --- a/src/torchrunx/utils/logs.py +++ b/src/torchrunx/utils/logs.py @@ -169,7 +169,19 @@ def handle(self) -> None: while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) obj = pickle.loads(chunk) - record = logging.makeLogRecord(obj) + + ## Transform log record + + record: WorkerLogRecord = logging.makeLogRecord(obj) # pyright: ignore [reportAssignmentType] + + if record.name != "root": + record.msg = f"{record.name}:{record.msg}" + + record.name = f"torchrunx.{record.hostname}" + if record.local_rank is not None: + record.name += f".{record.local_rank}" + + ## Handle log record for handler in handlers: handler.handle(record) diff --git a/src/torchrunx/worker.py b/src/torchrunx/worker.py index 0da0c7e9..a647a095 100644 --- a/src/torchrunx/worker.py +++ b/src/torchrunx/worker.py @@ -59,8 +59,6 @@ def worker_entrypoint(serialized_worker_args: bytes) -> Any | ExceptionFromWorke # Start logging to the logging server (i.e. the launcher) - logger = logging.getLogger(f"{__package__}.{worker_args.hostname}.{worker_args.local_rank}") - log_records_to_socket( hostname=worker_args.hostname, local_rank=worker_args.local_rank, @@ -68,6 +66,7 @@ def worker_entrypoint(serialized_worker_args: bytes) -> Any | ExceptionFromWorke logger_port=worker_args.logger_port, ) + logger = logging.getLogger() redirect_stdio_to_logger(logger) # Set rank/world environment variables From 1035b759a3d765d6234dd011dcbee2bbb29d245f Mon Sep 17 00:00:00 2001 From: apoorvkh Date: Mon, 10 Mar 2025 15:24:30 -0400 Subject: [PATCH 141/141] final updates to docs --- CONTRIBUTING.md | 4 +- README.md | 31 +++-- docs/source/examples/deepspeed.md | 2 +- docs/source/how_it_works.md | 4 + docs/source/usage/general.md | 8 +- docs/source/usage/logging.md | 61 ++++++--- docs/source/usage/slurm.md | 10 +- src/torchrunx/agent.py | 2 +- src/torchrunx/launcher.py | 6 +- src/torchrunx/utils/log_handling.py | 108 ++++++++++++++++ .../utils/{logs.py => log_streaming.py} | 119 +----------------- src/torchrunx/worker.py | 2 +- 12 files changed, 186 insertions(+), 171 deletions(-) create mode 100644 src/torchrunx/utils/log_handling.py rename src/torchrunx/utils/{logs.py => log_streaming.py} (58%) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2769890b..e9962865 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,7 +6,7 @@ We use `ruff check` for linting, `ruff format` for formatting, `pyright` for sta ## Pull Requests -Make a pull request with your changes on Github and we'll try to look at it soon! If addressing a specific issue, mention it in the PR, and offer a short explanation of your fix. If adding a new feature, explain why it's meaningful and belongs in __torchrunx__. +Make a pull request with your changes on Github and we'll try to look at it soon! If addressing a specific issue, mention it in the PR, and offer a short explanation of your fix. If adding a new feature, explain why it's meaningful and belongs in **torchrunx**. ## Testing @@ -16,4 +16,4 @@ At the moment, we run `pytest tests/test_ci.py` (i.e. simple single-node CPU-onl ## Documentation -Our documentation is hosted on Github Pages and is updated with every package release. We build our documentation with `sphinx` using the command: `uv run --group docs python -m sphinx --builder html --doctree-dir docs/_build/.doctrees --conf-dir docs --show-traceback docs/source docs/_build/html`. The documentation will then be generated at `docs/_build/html`. +Our documentation is hosted on Github Pages and is updated with every package release. We build our documentation with [Sphinx](https://www.sphinx-doc.org): `source scripts/build_docs.sh`. The documentation will then be generated at `docs/_build/html` (and can be rendered with `python -m http.server --directory docs/_build/html`). diff --git a/README.md b/README.md index 22b42bab..d86e0e5d 100644 --- a/README.md +++ b/README.md @@ -21,20 +21,16 @@ It enables complex workflows within a single script and has useful features even pip install torchrunx ``` -Requires: -- Linux -- If using multiple machines: SSH & shared filesystem +Requires: Linux. If using multiple machines: SSH & shared filesystem. --- -**Dummy example: parallelizing training with `torchrunx`** +

Example: simple training loop

+ +Suppose we have some distributed training function (which needs to run on every GPU): ```python -def distributed_training(model: nn.Module, num_steps: int) -> nn.Module: - # Environment variables: RANK, LOCAL_RANK, ... - # ddp_model = DistributedDataParallel(model, device_ids=[local_rank]) - ... - retun trained_model +def distributed_training(model: nn.Module, num_steps: int) -> nn.Module: ... ```
@@ -70,14 +66,14 @@ def distributed_training(model: nn.Module, num_steps: int = 10) -> nn.Module | N
+We can distribute and run this function (e.g. on 2 machines x 2 GPUs) using **`torchrunx`**! + ```python import torchrunx -# Launch training on 2 machines x 2 GPUs - launcher = torchrunx.Launcher( - hostnames = ["localhost", "second_machine"], - workers_per_host = 2 + hostnames = ["localhost", "second_machine"], # or IP addresses + workers_per_host = 2 # e.g. number of GPUs per host ) results = launcher.run( @@ -87,16 +83,17 @@ results = launcher.run( ) ``` +Once completed, you can retrieve the results and process them as you wish. + ```python -# get the results trained_model: nn.Module = results.rank(0) -# or: results.index(hostname="localhost", local_rank=0) + # or: results.index(hostname="localhost", local_rank=0) -# and continue your script — e.g. save model to checkpoint +# and continue your script torch.save(trained_model.state_dict(), "output/model.pth") ``` -**See examples where we fine-tune LLMs using:** +**See more examples where we fine-tune LLMs using:** - [Transformers](https://torchrun.xyz/examples/transformers.html) - [DeepSpeed](https://torchrun.xyz/examples/deepspeed.html) - [PyTorch Lightning](https://torchrun.xyz/examples/lightning.html) diff --git a/docs/source/examples/deepspeed.md b/docs/source/examples/deepspeed.md index c0b3bc64..e4cea9b4 100644 --- a/docs/source/examples/deepspeed.md +++ b/docs/source/examples/deepspeed.md @@ -14,7 +14,7 @@ Here's an example script that uses `torchrunx` with [DeepSpeed](https://www.deep ## Training GPT-2 on WikiText -Deepspeed requires additional (non-Python) dependencies. Use the following commands to set up a project. Source: [Apoorv's Blog — Managing Project Dependencies](https://blog.apoorvkh.com/posts/project-dependencies.html) +Deepspeed requires additional (non-Python) dependencies. Use the following commands to set up a project. [source: [Apoorv's Blog — Managing Project Dependencies](https://blog.apoorvkh.com/posts/project-dependencies.html)] Pre-requisite: [pixi](https://pixi.sh) diff --git a/docs/source/how_it_works.md b/docs/source/how_it_works.md index 6bbbceea..8e41eb39 100644 --- a/docs/source/how_it_works.md +++ b/docs/source/how_it_works.md @@ -4,12 +4,16 @@ Suppose you want to run a script (`train.py`) on `N` machines (or "nodes") with You'll need to start a new process for each GPU. Each process will execute your script in parallel and select its GPU based on the process rank. Your script will also form a [distributed group](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) so the processes may communicate with each other (e.g. passing tensors). +## `torchrun` + Normally, you'd do this by running the `torchrun --node-rank {i} ... train.py ...` command on every machine. In short, you'll end up with a topology like: ![torchrun diagram](./artifacts/torchrun.png) As a side effect of this structure, every process will run until (1) script completion or (2) another process stops communicating (e.g. if killed by the system for abnormal reasons). The status of other processes is not actively communicated: so if some process is indeed killed, it would take 10 minutes (by default) for the remaining processes to time-out. Also, since this approach parallelizes the entire script, we can't catch and handle these system-level issues as exceptions. +## `torchrunx` 🔥 + `torchrunx` offers a functional interface, with a launcher–worker topology, instead. ![torchrunx diagram](./artifacts/torchrunx.png) diff --git a/docs/source/usage/general.md b/docs/source/usage/general.md index 9626b7ca..fbe992be 100644 --- a/docs/source/usage/general.md +++ b/docs/source/usage/general.md @@ -34,7 +34,7 @@ You can catch these errors and handle them as you wish! ```python for config in configs: # e.g. hyper-parameter sweep try: - Launcher().run(train, config) + torchrunx.Launcher().run(train, config) except torch.cuda.OutOfMemoryError: print(f"{config} results in OOM... continuing...") ``` @@ -44,12 +44,12 @@ If you are expecting intermittent failures, you can catch errors and invoke retr ```python for retry in range(3): try: - Launcher().run(train, resume_from_checkpoint=True) + torchrunx.Launcher().run(train, resume_from_checkpoint=True) except torchrunx.WorkerFailedError as e: print(f"Error occurred: {e}") print(f"Retrying ({retry}) ...") - else: - break + else: # if run() is successful + break ``` ## Environment variables diff --git a/docs/source/usage/logging.md b/docs/source/usage/logging.md index 14be733b..0e764938 100644 --- a/docs/source/usage/logging.md +++ b/docs/source/usage/logging.md @@ -1,37 +1,62 @@ # Custom Logging -We forward all worker and agent logs (i.e. from {mod}`logging`, {obj}`sys.stdout`, and {obj}`sys.stderr`) to the launcher for processing. +We forward all agent and worker logs (i.e. from {mod}`logging`, {obj}`sys.stdout`, and {obj}`sys.stderr`) to the launcher process. -By default, the logs from the rank 0 agent and worker are printed into the launcher's `stdout` stream. Logs from all agents and workers are written to a directory (by the current timestamp) in `$TORCHRUNX_LOG_DIR` (default: `./torchrunx_logs`). +## Defaults -You can fully customize how logs are processed using {func}`torchrunx.Launcher.set_logging_handlers`. You should provide it a function that constructs and returns a list of {obj}`logging.Handler` objects. Each {obj}`logging.Handler` controls where logs should be written. +By default, the logs from the rank 0 agent and rank 0 worker are handled by loggers on the launcher process (and so they should be printed to `stdout`/`stderr`). You may control these logs like: -We provide some handler utilities that direct a specified worker or agent's logs to a file or stream. - -```{eval-rst} -.. autofunction:: torchrunx.utils.file_handler +```python +logging.basicConfig(level=logging.INFO) +logging.getLogger("torchrunx").setLevel(logging.DEBUG) +logging.getLogger("torchrunx.node1").setLevel(logging.INFO) +logging.getLogger("torchrunx.node1.1").setLevel(logging.INFO) # worker 1 (local rank) on node 1 ``` -```{eval-rst} -.. autofunction:: torchrunx.utils.stream_handler -``` +Also, logs from all agents and workers are written to a directory (by the current timestamp) in `$TORCHRUNX_LOG_DIR` (default: `./torchrunx_logs`). These can be controlled using `$TORCHRUNX_LOG_LEVEL` (default: `INFO`). -For example, we could construct and pass a handler factory that streams the rank 0 agent and worker logs to the launcher's `stdout`. +## Customization + +You can fully customize how logs are processed using {func}`torchrunx.Launcher.set_logging_handlers`. You should provide it a factory function that constructs and returns a list of {obj}`logging.Handler` objects. Each {obj}`logging.Handler` controls where logs should be written. You can also add a filter to restrict the handler to the logs of a specific agent or worker. + +Here's an example: ```python -def rank_0_handlers() -> list[logging.Handler]: +from torchrunx.utils.log_handling import RedirectHandler, get_handler_filter + +def custom_handlers() -> list[logging.Handler]: + + # Handler: redirect logs from (host 0, agent) to logger on launcher process + redirect_handler = RedirectHandler() + redirect_handler.addFilter(get_handler_filter( + hostname=hostnames[0], local_rank=None, log_level=logging.DEBUG + )) + + # Handler: output logs from (host 0, worker 0) to "output.txt" + file_handler = logging.FileHandler("output.txt") + file_handler.addFilter(get_handler_filter( + hostname=hostnames[0], local_rank=0, log_level=logging.DEBUG + )) + return [ - stream_handler(hostname=hostnames[0], local_rank=None), # agent 0 - stream_handler(hostname=hostnames[0], local_rank=0), # worker 0 + redirect_handler, + file_handler, ] ``` ```python -torchrunx.Launcher(...).set_logging_handlers(rank_0_handlers).run(...) +torchrunx.Launcher(...).set_logging_handlers(custom_handlers).run(...) ``` -You can also [provide your own ``logging.Handler``](https://docs.python.org/3.9/library/logging.handlers.html#module-logging.handlers) and apply {func}`torchrunx.utils.add_filter_to_handler` to constrain which worker or agent's logs it should process. +Finally, you can control library-specific logging (within the worker processes) by modifying the distributed function: + +```python +def distributed_function(): + logging.getLogger("transformers").setLevel(logging.DEBUG) + + logger = logging.getLogger("my_app") + logger.info("Hello world!") + ... -```{eval-rst} -.. autofunction:: torchrunx.utils.add_filter_to_handler +torchrunx.Launcher(...).run(distributed_function) ``` diff --git a/docs/source/usage/slurm.md b/docs/source/usage/slurm.md index 3483f4f2..e34a1a1c 100644 --- a/docs/source/usage/slurm.md +++ b/docs/source/usage/slurm.md @@ -14,9 +14,8 @@ def distributed_training(): if __name__ == "__main__": torchrunx.Launcher( - # optionally specify: - # hostnames = "slurm", - # workers_per_host = "gpu" + hostnames = "slurm", + workers_per_host = "gpu" ).run(distributed_training) ``` @@ -46,9 +45,8 @@ def distributed_training(): def launch_training(): torchrunx.Launcher( - # optionally specify: - # hostnames = "slurm", - # workers_per_host = "gpu" + hostnames = "slurm", + workers_per_host = "gpu" ).run(distributed_training) if __name__ == "__main__": diff --git a/src/torchrunx/agent.py b/src/torchrunx/agent.py index 2225e5bd..4e518e6c 100644 --- a/src/torchrunx/agent.py +++ b/src/torchrunx/agent.py @@ -19,7 +19,7 @@ LauncherAgentGroup, get_open_port, ) -from .utils.logs import log_records_to_socket, redirect_stdio_to_logger +from .utils.log_streaming import log_records_to_socket, redirect_stdio_to_logger from .worker import WorkerArgs, worker_entrypoint diff --git a/src/torchrunx/launcher.py b/src/torchrunx/launcher.py index 2df0cf10..f228076b 100644 --- a/src/torchrunx/launcher.py +++ b/src/torchrunx/launcher.py @@ -29,7 +29,8 @@ resolve_environment, ) from .utils.errors import ExceptionFromWorker, WorkerFailedError -from .utils.logs import LoggingServerArgs, default_handlers, start_logging_server +from .utils.log_handling import default_handlers +from .utils.log_streaming import LoggingServerArgs, start_logging_server DEFAULT_ENV_VARS_FOR_COPY = ( "PATH", @@ -80,10 +81,9 @@ def set_logging_handlers( ) -> Self: """Provide a ``handler_factory`` function to customize processing of agent/worker logs. - See `Custom Logging `_. - Parameters: handler_factory: Function that constructs and returns :obj:`logging.Handler` objects. + See `Custom Logging `_ for more details. """ self.handler_factory = handler_factory return self diff --git a/src/torchrunx/utils/log_handling.py b/src/torchrunx/utils/log_handling.py new file mode 100644 index 00000000..34e919f1 --- /dev/null +++ b/src/torchrunx/utils/log_handling.py @@ -0,0 +1,108 @@ +"""Utilities for intercepting logs in worker processes and handling these in the Launcher.""" + +from __future__ import annotations + +__all__ = [ + "RedirectHandler", + "default_handlers", + "file_handlers", + "get_handler_filter", +] + +import datetime +import logging +import os +from logging import LogRecord +from pathlib import Path +from typing import Callable + + +def get_handler_filter( + hostname: str, + local_rank: int | None, # None indicates agent + log_level: int = logging.NOTSET, +) -> Callable[[LogRecord], bool]: + """Get an agent- or worker- specific filter to apply to :obj:`logging.Handler`.""" + return lambda record: ( + record.hostname == hostname # pyright: ignore [reportAttributeAccessIssue] + and record.local_rank == local_rank # pyright: ignore [reportAttributeAccessIssue] + and record.levelno >= log_level + ) + + +class RedirectHandler(logging.Handler): + """For handling logs from hostname/rank with a corresponding logger in the launcher process.""" + + def emit(self, record: LogRecord) -> None: + """Handle log record using corresponding logger.""" + logger = logging.getLogger(record.name) + if logger.isEnabledFor(record.levelno): + logger.handle(record) + + +def file_handlers( + hostnames: list[str], + workers_per_host: list[int], + log_dir: str | os.PathLike = Path("torchrunx_logs"), + log_level: int = logging.NOTSET, +) -> list[logging.Handler]: + """Handler builder function for writing logs for all workers/agents to a directory. + + Files are named with hostname and the local_rank (for workers). + """ + handlers = [] + + timestamp = datetime.datetime.now().isoformat(timespec="seconds") + log_dir = Path(log_dir) / timestamp + log_dir.mkdir(parents=True, exist_ok=True) + + formatter = logging.Formatter( + "%(asctime)s:%(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S" + ) + + for hostname, num_workers in zip(hostnames, workers_per_host): + for local_rank in [None, *range(num_workers)]: + local_rank_str = f"[{local_rank}]" if local_rank is not None else "" + file_path = log_dir / f"{hostname}{local_rank_str}.log" + + h = logging.FileHandler(file_path) + h.addFilter(get_handler_filter(hostname, local_rank, log_level=log_level)) + h.setFormatter(formatter) + + handlers.append(h) + + return handlers + + +def default_handlers(hostnames: list[str], workers_per_host: list[int]) -> list[logging.Handler]: + """Constructs default :obj:`logging.Handler` objects. + + Logs for the rank 0 agent and rank 0 worker are redirected to loggers in the launcher process. + Logs for all hosts/workers are written to files in ``$TORCHRUNX_LOG_DIR`` (named by timestamp, + hostname, local_rank). + """ + log_dir = Path(os.environ.get("TORCHRUNX_LOG_DIR", "torchrunx_logs")) + + file_log_level = os.environ.get("TORCHRUNX_LOG_LEVEL", "INFO") + if file_log_level.isdigit(): + file_log_level = int(file_log_level) + elif file_log_level in logging._nameToLevel: # noqa: SLF001 + file_log_level = logging._nameToLevel[file_log_level] # noqa: SLF001 + else: + msg = ( + f"Invalid value for $TORCHRUNX_LOG_LEVEL: {file_log_level}. " + f"Should be a positive integer or any of: {', '.join(logging._nameToLevel.keys())}." # noqa: SLF001 + ) + raise ValueError(msg) + + redirect_agent_0_handler = RedirectHandler() + redirect_agent_0_handler.addFilter(get_handler_filter(hostnames[0], None)) + + redirect_worker_0_handler = RedirectHandler() + redirect_worker_0_handler.addFilter(get_handler_filter(hostnames[0], 0)) + + return [ + redirect_agent_0_handler, + redirect_worker_0_handler, + *file_handlers(hostnames, workers_per_host, log_dir=log_dir, log_level=file_log_level), + ] diff --git a/src/torchrunx/utils/logs.py b/src/torchrunx/utils/log_streaming.py similarity index 58% rename from src/torchrunx/utils/logs.py rename to src/torchrunx/utils/log_streaming.py index b09098b6..af5ff520 100644 --- a/src/torchrunx/utils/logs.py +++ b/src/torchrunx/utils/log_streaming.py @@ -4,19 +4,12 @@ __all__ = [ "LoggingServerArgs", - "add_filter_to_handler", - "default_handlers", - "file_handler", - "file_handlers", "log_records_to_socket", "redirect_stdio_to_logger", "start_logging_server", - "stream_handler", ] -import datetime import logging -import os import pickle import signal import struct @@ -24,125 +17,15 @@ from contextlib import redirect_stderr, redirect_stdout from dataclasses import dataclass from io import StringIO -from logging import Handler, Logger, LogRecord +from logging import Handler, Logger from logging.handlers import SocketHandler from multiprocessing.synchronize import Event as EventClass -from pathlib import Path from socketserver import StreamRequestHandler, ThreadingTCPServer from typing import Callable import cloudpickle from typing_extensions import Self -## Handler utilities - - -def add_filter_to_handler( - handler: logging.Handler, - hostname: str, - local_rank: int | None, # None indicates agent - log_level: int = logging.NOTSET, -) -> None: - """Apply an agent- or worker- specific filter to :obj:`logging.Handler`.""" - - def _filter(record: WorkerLogRecord) -> bool: - return ( - record.hostname == hostname - and record.local_rank == local_rank - and record.levelno >= log_level - ) - - handler.addFilter(_filter) # pyright: ignore [reportArgumentType] - - -def default_handlers(hostnames: list[str], workers_per_host: list[int]) -> list[logging.Handler]: - """Constructs default :obj:`logging.Handler` objects. - - Logs for the rank 0 agent and worker are written to launcher process stdout. - Logs for all hosts/workers are written to files in ``$TORCHRUNX_LOG_DIR`` (named by timestamp, - hostname, local_rank). - """ - log_dir = Path(os.environ.get("TORCHRUNX_LOG_DIR", "torchrunx_logs")) - - file_log_level = os.environ.get("TORCHRUNX_LOG_LEVEL", "INFO") - if file_log_level.isdigit(): - file_log_level = int(file_log_level) - elif file_log_level in logging._nameToLevel: # noqa: SLF001 - file_log_level = logging._nameToLevel[file_log_level] # noqa: SLF001 - else: - msg = ( - f"Invalid value for $TORCHRUNX_LOG_LEVEL: {file_log_level}. " - f"Should be a positive integer or any of: {', '.join(logging._nameToLevel.keys())}." # noqa: SLF001 - ) - raise ValueError(msg) - - return [ - RedirectHandler(hostname=hostnames[0], local_rank=None), - RedirectHandler(hostname=hostnames[0], local_rank=0), - *file_handlers(hostnames, workers_per_host, log_dir=log_dir, log_level=file_log_level), - ] - - -class RedirectHandler(logging.Handler): - def __init__(self, hostname: str, local_rank: int | None) -> None: - super().__init__() - add_filter_to_handler(self, hostname=hostname, local_rank=local_rank) - - def emit(self, record: LogRecord) -> None: - logger = logging.getLogger(record.name) - if logger.isEnabledFor(record.levelno): - logger.handle(record) - - -def stream_handler( - hostname: str, local_rank: int | None, log_level: int = logging.NOTSET -) -> logging.Handler: - """Handler builder function for writing logs from specified hostname/rank to stdout.""" - handler = logging.StreamHandler(stream=sys.stdout) - add_filter_to_handler(handler, hostname, local_rank, log_level=log_level) - return handler - - -def file_handler( - hostname: str, - local_rank: int | None, - file_path: str | os.PathLike, - log_level: int = logging.NOTSET, -) -> logging.Handler: - """Handler builder function for writing logs from specified hostname/rank to a file.""" - handler = logging.FileHandler(file_path) - add_filter_to_handler(handler, hostname, local_rank, log_level=log_level) - handler.setFormatter( - logging.Formatter("%(asctime)s:%(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S") - ) - return handler - - -def file_handlers( - hostnames: list[str], - workers_per_host: list[int], - log_dir: str | os.PathLike = Path("torchrunx_logs"), - log_level: int = logging.NOTSET, -) -> list[logging.Handler]: - """Handler builder function for writing logs for all workers/agents to a directory. - - Files are named with hostname and the local_rank (for workers). - """ - handlers = [] - - timestamp = datetime.datetime.now().isoformat(timespec="seconds") - log_dir = Path(log_dir) / timestamp - log_dir.mkdir(parents=True, exist_ok=True) - - for hostname, num_workers in zip(hostnames, workers_per_host): - for local_rank in [None, *range(num_workers)]: - local_rank_str = f"[{local_rank}]" if local_rank is not None else "" - file_path = log_dir / f"{hostname}{local_rank_str}.log" - handlers.append(file_handler(hostname, local_rank, file_path, log_level=log_level)) - - return handlers - - ## Launcher utilities diff --git a/src/torchrunx/worker.py b/src/torchrunx/worker.py index a647a095..eb6358e3 100644 --- a/src/torchrunx/worker.py +++ b/src/torchrunx/worker.py @@ -15,7 +15,7 @@ from typing_extensions import Self from .utils.errors import ExceptionFromWorker -from .utils.logs import log_records_to_socket, redirect_stdio_to_logger +from .utils.log_streaming import log_records_to_socket, redirect_stdio_to_logger __all__ = ["WorkerArgs", "worker_entrypoint"]