Send patches - preferably formatted by git format-patch - to patches at archlinux32 dot org.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnton Hvornum <anton.feeds+github@gmail.com>2020-07-06 21:02:03 +0000
committerGitHub <noreply@github.com>2020-07-06 21:02:03 +0000
commit0c57871f3b3832e31c0c835a671064364c2e9575 (patch)
tree5fe159d12713f48af46d4c339b265f24d38f5858
parent067b31b1aa7f0111f164dfda486adbe565d6a1a9 (diff)
parentf840f9e714d9f7d2fcb601c46079978f7a20df71 (diff)
Merge pull request #39 from Torxed/annotations
v2.0.1
-rw-r--r--.gitignore6
-rw-r--r--LICENSE674
-rw-r--r--LICENSE.md25
-rw-r--r--README.md142
-rw-r--r--archinstall.py1499
-rw-r--r--archinstall/__init__.py7
-rw-r--r--archinstall/lib/__init__.py0
-rw-r--r--archinstall/lib/disk.py210
-rw-r--r--archinstall/lib/exceptions.py6
-rw-r--r--archinstall/lib/general.py204
-rw-r--r--archinstall/lib/installer.py107
-rw-r--r--archinstall/lib/luks.py53
-rw-r--r--archinstall/lib/profiles.py195
-rw-r--r--archinstall/lib/user_interaction.py17
-rw-r--r--deployments/applications/awesome.json15
-rw-r--r--docs/description.jpg (renamed from description.jpg)bin96324 -> 96324 bytes
-rw-r--r--docs/logo.png (renamed from logo.png)bin44691 -> 44691 bytes
-rw-r--r--docs/logo.psd (renamed from logo.psd)bin603666 -> 603666 bytes
-rw-r--r--examples/main_example.py32
-rw-r--r--profiles/00:01:23:45:67:89.json (renamed from deployments/00:01:23:45:67:89.json)0
-rw-r--r--profiles/00:11:22:33:44:55.json (renamed from deployments/00:11:22:33:44:55.json)0
-rw-r--r--profiles/38:00:25:5a:ed:d5.json (renamed from deployments/38:00:25:5a:ed:d5.json)0
-rw-r--r--profiles/applications/awesome.json17
-rw-r--r--profiles/applications/gnome.json (renamed from deployments/applications/gnome.json)0
-rw-r--r--profiles/applications/kde.json (renamed from deployments/applications/kde.json)0
-rw-r--r--profiles/applications/postgresql.json (renamed from deployments/applications/postgresql.json)0
-rw-r--r--profiles/default.json (renamed from deployments/default.json)0
-rw-r--r--profiles/desktop_gnome.json (renamed from deployments/desktop_gnome.json)0
-rw-r--r--profiles/desktop_kde.json (renamed from deployments/desktop_kde.json)0
-rw-r--r--profiles/dns_server.json (renamed from deployments/dns_server.json)0
-rw-r--r--profiles/gitea.json (renamed from deployments/gitea.json)0
-rw-r--r--profiles/local_mirror.json (renamed from deployments/local_mirror.json)0
-rw-r--r--profiles/minimal_example.json (renamed from deployments/minimal_example.json)0
-rw-r--r--profiles/pentest.json (renamed from deployments/pentest.json)0
-rw-r--r--profiles/router.json (renamed from deployments/router.json)0
-rw-r--r--profiles/ubuntu.json (renamed from deployments/ubuntu.json)0
-rw-r--r--profiles/vmhost.json (renamed from deployments/vmhost.json)0
-rw-r--r--profiles/webserver.json (renamed from deployments/webserver.json)0
-rw-r--r--profiles/workstation.json (renamed from deployments/workstation.json)0
-rw-r--r--profiles/workstation_aur.json (renamed from deployments/workstation_aur.json)0
-rw-r--r--profiles/workstation_unattended.json (renamed from deployments/workstation_unattended.json)0
-rw-r--r--setup.py22
-rw-r--r--test_archinstall.py14
43 files changed, 1601 insertions, 1644 deletions
diff --git a/.gitignore b/.gitignore
index 45939095..fac1c89b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,8 @@
**/**__pycache__
SAFETY_LOCK
+**/**old.*
+**/**.img
+**/**pwfile
+**/**build
+**/**dist
+**/**.egg* \ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..f288702d
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/LICENSE.md b/LICENSE.md
deleted file mode 100644
index 81fc4a1c..00000000
--- a/LICENSE.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
- Copyright (c) 2018-present Anton Hvornum (https://github.com/Torxed).
-
-Permission is hereby granted for non-commercial use, free of charge, to whomever (including
-non-profit organizations not owned by commercial entities) obtainins a copy of this software
-and associated documentation files (the "Software"), to deal in the Software without restriction,
-including without limitation the rights to use, copy, modify, merge, publish,
-distribute copies of the Software for non-commercial usecases, and to permit persons
-to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software and are subject to:
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-For commercial purposes, contact the creator via any of the contact options listed under the following:
- * https://github.com/Torxed
- * https://hvornum.se/
diff --git a/README.md b/README.md
index d6dba190..8d5e0853 100644
--- a/README.md
+++ b/README.md
@@ -1,125 +1,85 @@
-# <img src="logo.png" alt="drawing" width="200"/>
-A guided/automated [Arch Linux](https://wiki.archlinux.org/index.php/Arch_Linux) installer.
+# <img src="https://github.com/Torxed/archinstall/raw/annotations/docs/logo.png" alt="drawing" width="200"/>
+Just another guided/automated [Arch Linux](https://wiki.archlinux.org/index.php/Arch_Linux) installer with a twist.
+The installer also doubles as a python library to access each individual installation step for customized installs.
+Pre-built ISO's can be found here which autostarts archinstall *(in a safe guided mode)*: https://hvornum.se/archiso/
* archinstall [discord](https://discord.gg/cqXU88y) server
* archinstall guided install ISO's: https://hvornum.se/archiso/
+ * archinstall on [#archinstall@freenode (IRC)](irc://#archinstall@FreeNode)
-# How-to / Usecases
+# Usage
## Run on Live-CD (Binary)
# wget https://gzip.app/archinstall
# chmod +x archinstall; ./archinstall
-## Run on Live-CD (Python):
+This downloads and runs a "compiled" *(using nuitka)* version of the project.<br>
+It defaults to starting a guided install with some safety checks in place.
- # wget https://raw.githubusercontent.com/Torxed/archinstall/master/archinstall.py
- # pacman -S --noconfirm python; python archinstall.py
+## Run on Live-CD with Python:
-This will start a guided install.<br>
-Add `--default` for a unattended minimalistic installation of Arch Linux.
+ # wget https://raw.githubusercontent.com/Torxed/archinstall/master/installer.py
+ # pacman -S --noconfirm python; python install.py
-> **Creating your own ISO:** Follow [ArchISO](https://wiki.archlinux.org/index.php/archiso)'s guide on how to create your own ISO or use a pre-built [guided ISO](https://hvornum.se/archiso/) to skip the python installation step, or to create auto-installing ISO templates. Further down are examples and cheat sheets on how to create different live ISO's.
-
-# Features
-
- * User guided install of Arch Linux *(Like most other distros have)*
- * `AUR` package support.
- * Unattended install of Arch Linux
- * Profile / Template based installs
- * Full disk encryption, locale/region settings and customizable application selection
- * YubiKey support for disk and root password *(TBD / next release)*
- * <strike>Supports offline-installation of Arch Linux</strike>
- * Never creates or leave post-install/service scripts *(usually used to finalize databases etc)*
-
-**Default Installation Contains:** Encrypts drive, btrfs filesystem, `linux` kernel, nano, wpa_supplicant *(and dialog)*
-
-# Examples:
+This will start a guided install with the same safety checks as previous.<br>
- * `./archinstall --profile=workstation --drive=/dev/sda` - Installs the [workstation](https://github.com/Torxed/archinstall/blob/master/deployments/workstation.json) template on the drive `/dev/sda`
+## Run using PIP and Python module:
-# [Build a Arch Linux ISO to autorun archinstall](https://github.com/Torxed/archinstall/wiki/Autorun-on-Arch-Live-CD)
+ # pip install archinstall
+ # python -m archinstall
-More options for the built ISO:
+Again, a guided install starts with safety checks.<br>
+This assumes tho that Python and Pip is present (not always the case on the default Arch Linux ISO), see above for pre-built ISO's containing Python+pip
-### [Unattended install of a profile](https://github.com/Torxed/archinstall/wiki/Unattended-install-of-a-profile)
+## Scripting an installation
-### [User guided install (DEFAULT)](https://github.com/Torxed/archinstall/wiki/User-guided-installation-(DEFAULT))
+Assuming you're building your own ISO and want to create an automated install process.<br>
+This is probably what you'll need, a minimal example of how to install using archinstall as a Python library.
-### [Custom web-server for deployment profiles](https://github.com/Torxed/archinstall/wiki/Custom-web-server-for-deployment-profiles)
+```python
+import archinstall, getpass
-### [Rerunning the installation](https://github.com/Torxed/archinstall/wiki/Rerunning-the-installation)
-# Some parameters you can give it
+hdd = archinstall.select_disk(archinstall.all_disks())
+disk_password = getpass.getpass(prompt='Disk password (won\'t echo): ')
- --drive=</dev/sdX>
- Which drive to install arch on, if absent, the first disk under /dev/ is used
+with archinstall.Filesystem(hdd, archinstall.GPT) as fs:
+ fs.use_entire_disk('luks2')
+ with archinstall.Luks2(fs) as crypt:
+ if hdd.partition[1]['size'] == '512M':
+ raise OSError('Trying to encrypt the boot partition for petes sake..')
- --minimal
- Starts a minimal installation, and skips looking for profiles.
-
- --size=100% (Default)
- Sets the size of the root filesystem (btrfs)
-
- --start=513MiB (Default)
- Sets the starting location of the root partition
- (TODO: /boot will take up space from 1MiB - <start>, make sure boot is no larger than 513MiB)
-
- --password=0000 (Default)
- Which disk password to use,
- --password="<STDIN>" for prompt of password
- --password="<YUBIKEY>" for setting a unique password on the YubiKey and use that as a password
- (NOTE: This will wipe/replace slot 1 on the YubiKey)
+ key_file = crypt.encrypt(hdd.partition[1], password=disk_password, key_size=512, hash_type='sha512', iter_time=10000, key_file='./pwfile')
+ unlocked_crypt_vol = crypt.mount(hdd.partition[1], 'luksloop', key_file)
- --aur-support (default)
+ with archinstall.Installer(unlocked_crypt_vol, hostname='testmachine') as installation:
+ if installation.minimal_installation():
+ installation.add_bootloader()
- --pwfile=/tmp/diskpw (Default)
- Which file to store the disk encryption password while sending it to cryptsetup
-
- --hostname=Arcinstall (Default)
- Sets the hostname of the box
-
- --country=all (Default)
- Default mirror allocation for fetching packages.
- If network is found, archinstall will try to attempt and guess which country the
- install originates from, basing it off GeoIP off your public IP (uses https://hvornu.se/ip/ for lookups)
-
- --packages='' (Default)
- Which additional packages to install, defaults to none.
- (Space separated as it's passed unchanged to `pacstrap`
-
- --user=<name>
- Adds an additional username to the system (default group Wheel)
-
- --post=reboot (Default)
- After a successful install, reboots into the system. Use --post=stay to not reboot.
+ installation.add_additional_packages(['nano', 'wget', 'git'])
+ installation.install_profile('desktop')
- --unattended
- This parameter causes the installation script to install arch unattended on the first disk
+ installation.user_create('anton', 'test')
+ installation.user_set_pw('root', 'toor')
- --profile=<name>
- For instance, --profile=workstation will install the workstation profile.
+ installation.add_AUR_support()
+```
- --profiles-path=https://example.com/profiles
- Changes the default path the script looks for deployment profiles.
- The default path is 'https://raw.githubusercontent.com/Torxed/archinstall/master/deployments'
+This installer will perform the following:
- --rerun="Name of step in profile"
- Enables you to skip the format, encryption and base install steps.
- And head straight for a step in the profile specified.
- (Useful for debugging a step in your profile)
+ * Prompt the user to select a disk and disk-password
+ * Proceed to wipe said disk
+ * Sets up a default 100% used disk with encryption
+ * Installs a basic instance of Arch Linux *(base base-devel linux linux-firmware btrfs-progs efibootmgr)*
+ * Installs and configures a bootloader
+ * Install additional packages *(nano, wget, git)*
+ * Installs a network-profile called `desktop` *(more on network profiles in the docs)*
+ * Adds AUR support by compiling and installing [yay](https://github.com/Jguer/yay)
- --localtime="Europe/Stockholm" (Default if --country=SE, otherwise GMT+0)
- Specify a localtime you're used to.
-
-Deployment profile structs support all the above parameters and more, for instance, custom arguments with string formatting.
-See [deployments/workstation.json](https://github.com/Torxed/archinstall/blob/net-deploy/deployments/workstation.json) for examples.
-
-# Contact
-
-IRC: `#archinstall@FreeNode`
+> **Creating your own ISO:** Follow [ArchISO](https://wiki.archlinux.org/index.php/archiso)'s guide on how to create your own ISO or use a pre-built [guided ISO](https://hvornum.se/archiso/) to skip the python installation step, or to create auto-installing ISO templates. Further down are examples and cheat sheets on how to create different live ISO's.
## End note
- ![description](description.jpg)
+![description](https://github.com/Torxed/archinstall/raw/annotations/docs/description.jpg) \ No newline at end of file
diff --git a/archinstall.py b/archinstall.py
deleted file mode 100644
index 086585fb..00000000
--- a/archinstall.py
+++ /dev/null
@@ -1,1499 +0,0 @@
-#!/usr/bin/python3
-import traceback
-import os, re, struct, sys, json, pty, shlex
-import urllib.request, urllib.parse, ssl, signal
-import time
-from glob import glob
-from select import epoll, EPOLLIN, EPOLLHUP
-from socket import socket, inet_ntoa, AF_INET, AF_INET6, AF_PACKET
-from collections import OrderedDict as oDict
-from subprocess import Popen, STDOUT, PIPE, check_output
-from random import choice
-from string import ascii_uppercase, ascii_lowercase, digits
-from hashlib import sha512
-from threading import Thread, enumerate as tenum
-
-if os.path.isfile('./SAFETY_LOCK'):
- SAFETY_LOCK = True
-else:
- SAFETY_LOCK = False
-
-profiles_path = 'https://raw.githubusercontent.com/Torxed/archinstall/master/deployments'
-rootdir_pattern = re.compile('^.*?/devices')
-harddrives = oDict()
-commandlog = []
-worker_history = oDict()
-instructions = oDict()
-args = {}
-
-create_log = True
-
-try:
- if 'log' in __builtins__.__dict__:
- create_log = False
-except:
- if 'log' in __builtins__:
- create_log = False
-
-if create_log:
- import logging
- from systemd.journal import JournalHandler
-
- # Custom adapter to pre-pend the 'origin' key.
- # TODO: Should probably use filters: https://docs.python.org/3/howto/logging-cookbook.html#using-filters-to-impart-contextual-information
- class CustomAdapter(logging.LoggerAdapter):
- def process(self, msg, kwargs):
- return '[{}] {}'.format(self.extra['origin'], msg), kwargs
-
- logger = logging.getLogger() # __name__
- journald_handler = JournalHandler()
- journald_handler.setFormatter(logging.Formatter('[{levelname}] {message}', style='{'))
- logger.addHandler(journald_handler)
- logger.setLevel(logging.DEBUG)
-
- class LOG_LEVELS:
- CRITICAL = 1
- ERROR = 2
- WARNING = 3
- INFO = 4
- DEBUG = 5
-
- LOG_LEVEL = 4
- def log(*msg, origin='UNKNOWN', level=5, **kwargs):
- if level <= LOG_LEVEL:
- msg = [item.decode('UTF-8', errors='backslashreplace') if type(item) == bytes else item for item in msg]
- msg = [str(item) if type(item) != str else item for item in msg]
- log_adapter = CustomAdapter(logger, {'origin': origin})
- if level <= 1:
- log_adapter.critical(' '.join(msg))
- elif level <= 2:
- log_adapter.error(' '.join(msg))
- elif level <= 3:
- log_adapter.warning(' '.join(msg))
- elif level <= 4:
- log_adapter.info(' '.join(msg))
- else:
- log_adapter.debug(' '.join(msg))
-
-## == Profiles Path can be set via --profiles-path=/path
-## This just sets the default path if the parameter is omitted.
-try:
- import psutil
-except:
- ## Time to monkey patch in all the stats and psutil fuctions if it isn't installed.
-
- class mem():
- def __init__(self, free, percent=-1):
- self.free = free
- self.percent = percent
-
- class disk():
- def __init__(self, size, free, percent):
- self.total = size
- self.used = 0
- self.free = free
- self.percent = percent
-
- class iostat():
- def __init__(self, interface, bytes_sent=0, bytes_recv=0):
- self.interface = interface
- self.bytes_recv = int(bytes_recv)
- self.bytes_sent = int(bytes_sent)
- def __repr__(self, *positionals, **kwargs):
- return f'iostat@{self.interface}[bytes_sent: {self.bytes_sent}, bytes_recv: {self.bytes_recv}]'
-
- class psutil():
- def cpu_percent(interval=0):
- ## This just counts the ammount of time the CPU has spent. Find a better way!
- with cmd("grep 'cpu ' /proc/stat | awk '{usage=($2+$4)*100/($2+$4+$5)} END {print usage}'") as output:
- for line in output:
- return float(line.strip().decode('UTF-8'))
-
- def virtual_memory():
- with cmd("grep 'MemFree: ' /proc/meminfo | awk '{free=($2)} END {print free}'") as output:
- for line in output:
- return mem(float(line.strip().decode('UTF-8')))
-
- def disk_usage(partition):
- disk_stats = os.statvfs(partition)
- free_size = disk_stats.f_bfree * disk_stats.f_bsize
- disk_size = disk_stats.f_blocks * disk_stats.f_bsize
- percent = (100/disk_size)*free_size
- return disk(disk_size, free_size, percent)
-
- def net_if_addrs():
- interfaces = {}
- for root, folders, files in os.walk('/sys/class/net/'):
- for name in folders:
- interfaces[name] = {}
- return interfaces
-
- def net_io_counters(pernic=True):
- data = {}
- for interface in psutil.net_if_addrs().keys():
- with cmd("grep '{interface}:' /proc/net/dev | awk '{{recv=$2}}{{send=$10}} END {{print send,recv}}'".format(interface=interface)) as output:
- for line in output:
- data[interface] = iostat(interface, *line.strip().decode('UTF-8').split(' ',1))
- return data
-
-
-## FIXME: dependency checks (fdisk, lsblk etc)
-def sig_handler(signal, frame):
- print('\nAborting further installation steps!')
- print(' Here\'s a summary of the commandline:')
- print(f' {sys.argv}')
-
- exit(0)
-signal.signal(signal.SIGINT, sig_handler)
-
-def gen_uid(entropy_length=256):
- return sha512(os.urandom(entropy_length)).hexdigest()
-
-def get_default_gateway_linux(*positionals, **kwargs):
- """Read the default gateway directly from /proc."""
- with open("/proc/net/route") as fh:
- for line in fh:
- fields = line.strip().split()
- if fields[1] != '00000000' or not int(fields[3], 16) & 2:
- continue
-
- return inet_ntoa(struct.pack("<L", int(fields[2], 16)))
-
-def get_local_MACs():
- macs = {}
- for nic, opts in psutil.net_if_addrs().items():
- for addr in opts:
- #if addr.family in (AF_INET, AF_INET6) and addr.address:
- if addr.family == AF_PACKET: # MAC
- macs[addr.address] = nic
- return macs
-
-def gen_yubikey_password():
- return None #TODO: Implement
-
-def pid_exists(pid):
- """Check whether pid exists in the current process table."""
- if pid < 0:
- return False
- try:
- os.kill(pid, 0)
- except (OSError, e):
- return e.errno == errno.EPERMRM
- else:
- return True
-
-def simple_command(cmd, opts=None, *positionals, **kwargs):
- if not opts: opts = {}
- if 'debug' in opts:
- print('[!] {}'.format(cmd))
- handle = Popen(cmd, shell='True', stdout=PIPE, stderr=STDOUT, stdin=PIPE)
- output = b''
- while handle.poll() is None:
- data = handle.stdout.read()
- if len(data):
- if 'debug' in opts:
- print(data.decode('UTF-8'), end='')
- # print(data.decode('UTF-8'), end='')
- output += data
- data = handle.stdout.read()
- if 'debug' in opts:
- print(data.decode('UTF-8'), end='')
- output += data
- handle.stdin.close()
- handle.stdout.close()
- return output
-
-class sys_command():#Thread):
- def __init__(self, cmd, callback=None, start_callback=None, *positionals, **kwargs):
- if not 'worker_id' in kwargs: kwargs['worker_id'] = gen_uid()
- if not 'emulate' in kwargs: kwargs['emulate'] = SAFETY_LOCK
- #Thread.__init__(self)
- if kwargs['emulate']:
- print(f"Starting command '{cmd}' in emulation mode.")
- self.cmd = shlex.split(cmd)
- self.args = args
- self.kwargs = kwargs
- if not 'worker' in self.kwargs: self.kwargs['worker'] = None
- self.callback = callback
- self.pid = None
- self.exit_code = None
- self.started = time.time()
- self.ended = None
- self.worker_id = kwargs['worker_id']
- self.trace_log = b''
- self.status = 'starting'
-
- user_catalogue = os.path.expanduser('~')
- self.cwd = f"{user_catalogue}/archinstall/cache/workers/{kwargs['worker_id']}/"
- self.exec_dir = f'{self.cwd}/{os.path.basename(self.cmd[0])}_workingdir'
-
- if not self.cmd[0][0] == '/':
- log('Worker command is not executed with absolute path, trying to find: {}'.format(self.cmd[0]), origin='spawn', level=5)
- o = check_output(['/usr/bin/which', self.cmd[0]])
- log('This is the binary {} for {}'.format(o.decode('UTF-8'), self.cmd[0]), origin='spawn', level=5)
- self.cmd[0] = o.decode('UTF-8').strip()
-
- if not os.path.isdir(self.exec_dir):
- os.makedirs(self.exec_dir)
-
- if self.kwargs['emulate']:
- commandlog.append(cmd + ' # (emulated)')
- elif 'hide_from_log' in self.kwargs and self.kwargs['hide_from_log']:
- pass
- else:
- commandlog.append(cmd)
- if start_callback: start_callback(self, *positionals, **kwargs)
- #self.start()
- self.run()
-
- def __iter__(self, *positionals, **kwargs):
- for line in self.trace_log.split(b'\n'):
- yield line
-
- def __repr__(self, *positionals, **kwargs):
- return f"{self.cmd, self.trace_log}"
-
- def decode(self, fmt='UTF-8'):
- return self.trace_log.decode(fmt)
-
- def dump(self):
- return {
- 'status' : self.status,
- 'worker_id' : self.worker_id,
- 'worker_result' : self.trace_log.decode('UTF-8'),
- 'started' : self.started,
- 'ended' : self.ended,
- 'started_pprint' : '{}-{}-{} {}:{}:{}'.format(*time.localtime(self.started)),
- 'ended_pprint' : '{}-{}-{} {}:{}:{}'.format(*time.localtime(self.ended)) if self.ended else None,
- 'exit_code' : self.exit_code
- }
-
- def run(self):
- #main = None
- #for t in tenum():
- # if t.name == 'MainThread':
- # main = t
- # break
-
- #if not main:
- # print('Main thread not existing')
- # return
-
- self.status = 'running'
- old_dir = os.getcwd()
- os.chdir(self.exec_dir)
- self.pid, child_fd = pty.fork()
- if not self.pid: # Child process
- # Replace child process with our main process
- if not self.kwargs['emulate']:
- try:
- os.execv(self.cmd[0], self.cmd)
- except FileNotFoundError:
- self.status = 'done'
- log(f"{self.cmd[0]} does not exist.", origin='spawn', level=2)
- self.exit_code = 1
- return False
-
- os.chdir(old_dir)
-
- poller = epoll()
- poller.register(child_fd, EPOLLIN | EPOLLHUP)
-
- if 'events' in self.kwargs and 'debug' in self.kwargs:
- print(f'[D] Using triggers for command: {self.cmd}')
- print(json.dumps(self.kwargs['events']))
-
- alive = True
- last_trigger_pos = 0
- while alive and not self.kwargs['emulate']:
- for fileno, event in poller.poll(0.1):
- try:
- output = os.read(child_fd, 8192).strip()
- self.trace_log += output
- except OSError:
- alive = False
- break
-
- if 'debug' in self.kwargs and self.kwargs['debug'] and len(output):
- print(self.cmd[0], 'gave:', output.decode('UTF-8'))
- log(self.cmd[0],'gave:', output.decode('UTF-8'), origin='spawn', level=4)
-
- if 'on_output' in self.kwargs:
- self.kwargs['on_output'](self.kwargs['worker'], output)
-
- lower = output.lower()
- broke = False
- if 'events' in self.kwargs:
- for trigger in list(self.kwargs['events']):
- if type(trigger) != bytes:
- original = trigger
- trigger = bytes(original, 'UTF-8')
- self.kwargs['events'][trigger] = self.kwargs['events'][original]
- del(self.kwargs['events'][original])
- if type(self.kwargs['events'][trigger]) != bytes:
- self.kwargs['events'][trigger] = bytes(self.kwargs['events'][trigger], 'UTF-8')
-
- if trigger.lower() in self.trace_log[last_trigger_pos:].lower():
- trigger_pos = self.trace_log[last_trigger_pos:].lower().find(trigger.lower())
-
- if 'debug' in self.kwargs and self.kwargs['debug']:
- print(f"Writing to subprocess {self.cmd[0]}: {self.kwargs['events'][trigger].decode('UTF-8')}")
- log(f"Writing to subprocess {self.cmd[0]}: {self.kwargs['events'][trigger].decode('UTF-8')}", origin='spawn', level=5)
-
- last_trigger_pos = trigger_pos
- os.write(child_fd, self.kwargs['events'][trigger])
- del(self.kwargs['events'][trigger])
- broke = True
- break
-
- if broke:
- continue
-
- ## Adding a exit trigger:
- if len(self.kwargs['events']) == 0:
- if 'debug' in self.kwargs and self.kwargs['debug']:
- log(f"Waiting for last command {self.cmd[0]} to finish.", origin='spawn', level=4)
-
- if bytes(f']$'.lower(), 'UTF-8') in self.trace_log[0-len(f']$')-5:].lower():
- if 'debug' in self.kwargs and self.kwargs['debug']:
- log(f"{self.cmd[0]} has finished.", origin='spawn', level=4)
- alive = False
- break
-
- self.status = 'done'
-
- if 'debug' in self.kwargs and self.kwargs['debug']:
- log(f"{self.cmd[0]} waiting for exit code.", origin='spawn', level=5)
-
- if not self.kwargs['emulate']:
- try:
- self.exit_code = os.waitpid(self.pid, 0)[1]
- except ChildProcessError:
- try:
- self.exit_code = os.waitpid(child_fd, 0)[1]
- except ChildProcessError:
- self.exit_code = 1
- else:
- self.exit_code = 0
-
- if 'ignore_errors' in self.kwargs:
- self.exit_code = 0
-
- if self.exit_code != 0:
- log(f"{self.cmd} did not exit gracefully, exit code {self.exit_code}.", origin='spawn', level=3)
- log(self.trace_log.decode('UTF-8'), origin='spawn', level=3)
- else:
- log(f"{self.cmd[0]} exit nicely.", origin='spawn', level=5)
-
- self.ended = time.time()
- with open(f'{self.cwd}/trace.log', 'wb') as fh:
- fh.write(self.trace_log)
-
- worker_history[self.worker_id] = self.dump()
-
- if 'dependency' in self.kwargs:
- pass # TODO: Not yet supported (steal it from archinstall_gui)
- """
- dependency = self.kwargs['dependency']
- if type(dependency) == str:
- # Dependency is a progress-string. Wait for it to show up.
- while main and main.isAlive() and dependency not in progress or progress[dependency] is None:
- time.sleep(0.25)
- dependency = progress[dependency]
-
- if type(dependency) == str:
- log(f"{self.func} waited for progress {dependency} which never showed up. Aborting.", level=2, origin='worker', function='run')
- self.ended = time.time()
- self.status = 'aborted'
- return None
-
- while main and main.isAlive() and dependency.ended is None:
- time.sleep(0.25)
-
- print(' *** Dependency released for:', self.func)
-
- if dependency.data is None or not main or not main.isAlive():
- log('Dependency:', dependency.func, 'did not exit clearly. There for,', self.func, 'can not execute.', level=2, origin='worker', function='run')
- self.ended = time.time()
- self.status = 'aborted'
- return None
- """
-
- if self.callback:
- pass # TODO: Not yet supported (steal it from archinstall_gui)
-
- #self.callback(self, *self.args, **self.kwargs)
-
-def get_drive_from_uuid(drive):
- if len(harddrives) <= 0: raise ValueError("No hard drives to iterate in order to find: {}".format(uuid))
-
- for drive in harddrives:
- #for partition in psutil.disk_partitions('/dev/{}'.format(name)):
- # pass #blkid -s PARTUUID -o value /dev/sda2
- o = simple_command(f'blkid -s PTUUID -o value /dev/{drive}')
- if len(o) and o == uuid:
- return drive
-
- return None
-
-def get_drive_from_part_uuid(partuuid, *positionals, **kwargs):
- if len(harddrives) <= 0: raise ValueError("No hard drives to iterate in order to find: {}".format(uuid))
-
- for drive in harddrives:
- for partition in get_partitions(f'/dev/{drive}', *positionals, **kwargs):
- o = simple_command(f'blkid -s PARTUUID -o value /dev/{drive}{partition}')
- if len(o) and o == partuuid:
- return drive
-
- return None
-
-def set_password(user, password, *positionals, **kwargs):
- if not SAFETY_LOCK:
- o = simple_command("/usr/bin/arch-chroot /mnt sh -c \"echo 'root:{pin}' | chpasswd\"".format(**args, pin=args['password']))
- return True
-
-def update_git(branch='master'):
- default_gw = get_default_gateway_linux()
- if(default_gw):
- print('[N] Checking for updates...')
- ## Not the most elegant way to make sure git conflicts doesn't occur (yea fml)
- if os.path.isfile('/root/archinstall/archinstall.py'):
- os.remove('/root/archinstall/archinstall.py')
- if os.path.isfile('/root/archinstall/README.md'):
- os.remove('/root/archinstall/README.md')
-
- output = simple_command('(cd /root/archinstall; git reset --hard origin/$(git branch | grep "*" | cut -d\' \' -f 2); git pull)')
-
- if b'error:' in output:
- print('[N] Could not update git source for some reason.')
- return
-
- # b'From github.com:Torxed/archinstall\n 339d687..80b97f3 master -> origin/master\nUpdating 339d687..80b97f3\nFast-forward\n README.md | 2 +-\n 1 file changed, 1 insertion(+), 1 deletion(-)\n'
- if output != b'Already up to date' or branch != 'master':
- #tmp = re.findall(b'[0-9]+ file changed', output)
- #print(tmp)
- #if len(tmp):
- # num_changes = int(tmp[0].split(b' ',1)[0])
- # if(num_changes):
-
- if branch != 'master':
- on_branch = simple_command('(cd /root/archinstall; git branch | grep "*" | cut -d\' \' -f 2)').decode('UTF-8').strip()
- if on_branch.lower() != branch.lower():
- print(f'[N] Changing branch from {on_branch} to {branch}')
- output = simple_command(f'(cd /root/archinstall; git checkout {branch}; git pull)')
- print('[N] Rebooting the new branch')
- if not 'rebooted' in args:
- os.execv('/usr/bin/python3', ['archinstall.py'] + sys.argv + ['--rebooted','--rerun'])
- else:
- os.execv('/usr/bin/python3', ['archinstall.py'] + sys.argv + ['--rerun',])
-
- if not 'rebooted' in args:
- ## Reboot the script (in same context)
- print('[N] Rebooting the script')
- os.execv('/usr/bin/python3', ['archinstall.py'] + sys.argv + ['--rebooted',])
- extit(1)
-
-def device_state(name, *positionals, **kwargs):
- # Based out of: https://askubuntu.com/questions/528690/how-to-get-list-of-all-non-removable-disk-device-names-ssd-hdd-and-sata-ide-onl/528709#528709
- if os.path.isfile('/sys/block/{}/device/block/{}/removable'.format(name, name)):
- with open('/sys/block/{}/device/block/{}/removable'.format(name, name)) as f:
- if f.read(1) == '1':
- return
-
- path = rootdir_pattern.sub('', os.readlink('/sys/block/{}'.format(name)))
- hotplug_buses = ("usb", "ieee1394", "mmc", "pcmcia", "firewire")
- for bus in hotplug_buses:
- if os.path.exists('/sys/bus/{}'.format(bus)):
- for device_bus in os.listdir('/sys/bus/{}/devices'.format(bus)):
- device_link = rootdir_pattern.sub('', os.readlink('/sys/bus/{}/devices/{}'.format(bus, device_bus)))
- if re.search(device_link, path):
- return
- return True
-
-def get_partitions(dev, *positionals, **kwargs):
- drive_name = os.path.basename(dev)
- parts = oDict()
- #o = b''.join(sys_command('/usr/bin/lsblk -o name -J -b {dev}'.format(dev=dev)))
- o = b''.join(sys_command(f'/usr/bin/lsblk -J {dev}', hide_from_log=True))
- if b'not a block device' in o:
- ## TODO: Replace o = sys_command() with code, o = sys_command()
- ## and make sys_command() return the exit-code, way safer than checking output strings :P
- return {}
-
- if not o[:1] == b'{':
- print('[E] Error in getting blk devices:', o)
- exit(1)
-
- r = json.loads(o.decode('UTF-8'))
- if len(r['blockdevices']) and 'children' in r['blockdevices'][0]:
- for part in r['blockdevices'][0]['children']:
- #size = os.statvfs(dev + part['name'][len(drive_name):])
- parts[part['name'][len(drive_name):]] = {
- #'size' : size.f_frsize * size.f_bavail,
- #'blocksize' : size.f_frsize * size.f_blocks
- 'size' : part['size']
- }
-
- return parts
-
-def get_disk_model(drive):
- with open(f'/sys/block/{os.path.basename(drive)}/device/model', 'rb') as fh:
- return fh.read().decode('UTF-8').strip()
-
-def get_disk_size(drive):
- dev_short_name = os.path.basename(drive)
- with open(f'/sys/block/{dev_short_name}/device/block/{dev_short_name}/size', 'rb') as fh:
- return ''.join(human_readable_size(fh.read().decode('UTF-8').strip()))
-
-def disk_info(drive, *positionals, **kwargs):
- lkwargs = {**kwargs}
- lkwargs['emulate'] = False # This is a emulate-safe function. Does not alter filesystem.
-
- info = json.loads(b''.join(sys_command(f'lsblk -J -o "NAME,SIZE,FSTYPE,LABEL" {drive}', *positionals, **lkwargs, hide_from_log=True)).decode('UTF_8'))['blockdevices'][0]
- fileformats = []
- labels = []
- if 'children' in info: ## Might not be partitioned yet
- for child in info['children']:
- if child['fstype'] != None:
- fileformats.append(child['fstype'])
- if child['label'] != None:
- labels.append(child['label'])
- else:
- fileformats = ['*Empty Drive*']
- labels = ['(no partitions)']
- info['fileformats'] = fileformats
- info['labels'] = labels
- info['model'] = get_disk_model(drive)
-
- return info
-
-def cleanup_args(*positionals, **kwargs):
- for key in args:
- if args[key] == '<STDIN>':
- if not args['unattended']:
- if 'input_redirect' in kwargs:
- args[key] = kwargs['input_redirect'](key)
- else:
- args[key] = input(f'Enter a value for {key}: ')
- else:
- args[key] = random_string(32)
- elif args[key] == '<RND_STR>': args[key] = random_string(32)
- elif args[key] == '<YUBIKEY>':
- args[key] = gen_yubikey_password()
- if not args[key]:
- print('[E] Failed to setup a yubikey password, is it plugged in?')
- exit(1)
-
-def merge_in_includes(instructions, *positionals, **kwargs):
- if 'args' in instructions:
- ## == Recursively fetch instructions if "include" is found under {args: ...}
- while 'include' in instructions['args']:
- includes = instructions['args']['include']
- print('[!] Importing net-deploy target: {}'.format(includes))
- del(instructions['args']['include'])
- if type(includes) in (dict, list):
- for include in includes:
- instructions = merge_dicts(instructions, get_instructions(include, *positionals, **kwargs), before=True)
- else:
- instructions = merge_dicts(instructions, get_instructions(includes), *positionals, **kwargs, before=True)
-
- ## Update arguments if we found any
- for key, val in instructions['args'].items():
- args[key] = val
-
- if 'args' in instructions:
- ## TODO: Reuseable code, there's to many get_instructions, merge_dictgs and args updating going on.
- ## Update arguments if we found any
- for key, val in instructions['args'].items():
- args[key] = val
- if 'user_args' in kwargs:
- for key, val in kwargs['user_args'].items():
- args[key] = val
-
- return instructions
-
-
-def update_drive_list(*positionals, **kwargs):
- # https://github.com/karelzak/util-linux/blob/f920f73d83f8fd52e7a14ec0385f61fab448b491/disk-utils/fdisk-list.c#L52
- for path in glob('/sys/block/*/device'):
- name = re.sub('.*/(.*?)/device', '\g<1>', path)
- if device_state(name, *positionals, **kwargs):
- harddrives[f'/dev/{name}'] = disk_info(f'/dev/{name}', *positionals, **kwargs)
-
-def human_readable_size(bits, sizes=[{8 : 'b'}, {1024 : 'kb'}, {1024 : 'mb'}, {1024 : 'gb'}, {1024 : 'tb'}, {1024 : 'zb?'}]):
- # Not needed if using lsblk.
- end_human = None
- for pair in sizes:
- size, human = list(pair.items())[0]
-
- if bits / size > 1:
- bits = bits/size
- end_human = human
- else:
- break
- return bits, end_human
-
-def human_disk_info(drive):
- return {
- 'size' : harddrives[drive]['size'],
- 'fileformat' : harddrives[drive]['fileformats'],
- 'labels' : harddrives[drive]['labels']
- }
-
-def close_disks():
- o = simple_command('/usr/bin/umount -R /mnt/boot')
- o = simple_command('/usr/bin/umount -R /mnt')
- o = simple_command('/usr/bin/cryptsetup close /dev/mapper/luksdev')
-
-def format_disk(drive='drive', start='start', end='size', emulate=False, *positionals, **kwargs):
- drive = args[drive]
- start = args[start]
- end = args[end]
- if not drive:
- raise ValueError('Need to supply a drive path, for instance: /dev/sdx')
-
- if not SAFETY_LOCK:
- # dd if=/dev/random of=args['drive'] bs=4096 status=progress
- # https://github.com/dcantrell/pyparted would be nice, but isn't officially in the repo's #SadPanda
- #if sys_command(f'/usr/bin/parted -s {drive} mklabel gpt', emulate=emulate, *positionals, **kwargs).exit_code != 0:
- # return None
- if sys_command(f'/usr/bin/parted -s {drive} mklabel gpt', emulate=emulate, *positionals, **kwargs).exit_code != 0:
- return None
- if sys_command(f'/usr/bin/parted -s {drive} mkpart primary FAT32 1MiB {start}', emulate=emulate, *positionals, **kwargs).exit_code != 0:
- return None
- if sys_command(f'/usr/bin/parted -s {drive} name 1 "EFI"', emulate=emulate, *positionals, **kwargs).exit_code != 0:
- return None
- if sys_command(f'/usr/bin/parted -s {drive} set 1 esp on', emulate=emulate, *positionals, **kwargs).exit_code != 0:
- return None
- if sys_command(f'/usr/bin/parted -s {drive} set 1 boot on', emulate=emulate, *positionals, **kwargs).exit_code != 0:
- return None
- if sys_command(f'/usr/bin/parted -s {drive} mkpart primary {start} {end}', emulate=emulate, *positionals, **kwargs).exit_code != 0:
- return None
-
- # TODO: grab partitions after each parted/partition step instead of guessing which partiton is which later on.
- # Create one, grab partitions - dub that to "boot" or something. do the next partition, grab that and dub it "system".. or something..
- # This "assumption" has bit me in the ass so many times now I've stoped counting.. Jerker is right.. Don't do it like this :P
-
- return True
-
-def multisplit(s, splitters):
- s = [s,]
- for key in splitters:
- ns = []
- for obj in s:
- x = obj.split(key)
- for index, part in enumerate(x):
- if len(part):
- ns.append(part)
- if index < len(x)-1:
- ns.append(key)
- s = ns
- return s
-
-def grab_url_data(path):
- safe_path = path[:path.find(':')+1]+''.join([item if item in ('/', '?', '=', '&') else urllib.parse.quote(item) for item in multisplit(path[path.find(':')+1:], ('/', '?', '=', '&'))])
- ssl_context = ssl.create_default_context()
- ssl_context.check_hostname = False
- ssl_context.verify_mode=ssl.CERT_NONE
- response = urllib.request.urlopen(safe_path, context=ssl_context)
- return response.read()
-
-def get_application_instructions(target):
- instructions = {}
- try:
- instructions = grab_url_data('{}/applications/{}.json'.format(args['profiles-path'], target)).decode('UTF-8')
- print('[N] Found application instructions for: {}'.format(target))
- except urllib.error.HTTPError:
- print('[N] Could not find remote instructions. yrying local instructions under ./deployments/applications')
- local_path = './deployments/applications' if os.path.isfile('./archinstall.py') else './archinstall/deployments/applications' # Dangerous assumption
- if os.path.isfile(f'{local_path}/{target}.json'):
- with open(f'{local_path}/{target}.json', 'r') as fh:
- instructions = fh.read()
-
- print('[N] Found local application instructions for: {}'.format(target))
- else:
- print('[N] No instructions found for: {}'.format(target))
- return instructions
-
- try:
- instructions = json.loads(instructions, object_pairs_hook=oDict)
- except:
- print('[E] JSON syntax error in {}'.format('{}/applications/{}.json'.format(args['profiles-path'], target)))
- traceback.print_exc()
- exit(1)
-
- return instructions
-
-def get_local_instructions(target):
- instructions = oDict()
- local_path = './deployments' if os.path.isfile('./archinstall.py') else './archinstall/deployments' # Dangerous assumption
- if os.path.isfile(f'{local_path}/{target}.json'):
- with open(f'{local_path}/{target}.json', 'r') as fh:
- instructions = fh.read()
-
- print('[N] Found local instructions called: {}'.format(target))
- else:
- print('[N] No instructions found called: {}'.format(target))
- return instructions
-
-def get_instructions(target, *positionals, **kwargs):
- if not 'profiles-path' in kwargs: kwargs['profiles-path'] = args['profiles-path']
- instructions = oDict()
- if target[0-len('.json'):] == '.json': target = target[:0-len('.json')]
- log(f'Fetching instructions for {target}', level=4, origin='get_instructions')
- if get_default_gateway_linux():
- try:
- instructions = grab_url_data(f"{kwargs['profiles-path']}/{target}.json").decode('UTF-8')
- log(f'Found net-deploy instructions for {target}', level=4, origin='get_instructions')
- print('[N] Found net-deploy instructions called: {}'.format(target))
- except urllib.error.HTTPError:
- print('[N] Could not find remote instructions. Trying local instructions under ./deployments')
- log(f'Could not find remote instructions. Trying local instructions under ./deployments', level=4, origin='get_instructions')
- instructions = get_local_instructions(target, *positionals)
- else:
- instructions = get_local_instructions(target, *positionals)
-
- if type(instructions) not in (dict, oDict,):
- try:
- instructions = json.loads(instructions, object_pairs_hook=oDict)
- except:
- log(f'JSON syntax error in: {target}', level=4, origin='get_instructions')
- print('[E] JSON syntax error in {}'.format('{}/{}.json'.format(kwargs['profiles-path'], target)))
- traceback.print_exc()
- exit(1)
-
- log(f'Final instructions are: {instructions}', level=4, origin='get_instructions')
- return instructions
-
-def merge_dicts(d1, d2, before=True, overwrite=False):
- """ Merges d2 into d1 """
- if before:
- d1, d2 = d2.copy(), d1.copy()
- overwrite = True
-
- for key, val in d2.items():
- if key in d1:
- if type(d1[key]) in [dict, oDict] and type(d2[key]) in [dict, oDict]:
- d1[key] = merge_dicts(d1[key] if not before else d2[key], d2[key] if not before else d1[key], before=before, overwrite=overwrite)
- elif overwrite:
- d1[key] = val
- else:
- d1[key] = val
-
- return d1
-
-def random_string(l):
- return ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(l))
-
-def phone_home(url):
- payload = json.dumps({"hostname": args['hostname'],
- "done" : time.time(),
- "profile": args['profile'],
- "drive": args['drive'],
- "base_status": base_return_code}).encode('utf8')
- request = urllib.request.Request(url, data=payload, headers={'content-type': 'application/json'})
- response = urllib.request.urlopen(request)
-
-def get_external_ip(*positionals, **kwargs):
- result = urllib.request.urlopen("https://hvornum.se/ip/?f=json").read().decode('UTF-8')
- return json.loads(result)['ip']
-
-def guess_country(ip, *positionals, **kwargs):
- # python-pygeoip
- # geoip-database
- result = None
- GEOIP_DB = '/usr/share/GeoIP/GeoIP.dat'
- if os.path.isfile(GEOIP_DB):
- try:
- import pygeoip
- except:
- ## TODO: Do a best-effort-guess based off the hostname given off the IP instead, if GoeIP doesn't exist.
- return result
-
- gi = pygeoip.GeoIP(GEOIP_DB)
- result = gi.country_code_by_addr(ip)
- else:
- log(f'Missing GeoIP database: {GEOIP_DB}', origin='guess_country', level=LOG_LEVELS.ERROR)
- return result
-
-def setup_args_defaults(args, *positionals, **kwargs):
- if not 'size' in args: args['size'] = '100%'
- if not 'mirrors' in args: args['mirrors'] = True
- if not 'start' in args: args['start'] = '513MiB'
- if not 'pwfile' in args: args['pwfile'] = '/tmp/diskpw'
- if not 'hostname' in args: args['hostname'] = 'Archinstall'
- if not 'packages' in args: args['packages'] = '' # extra packages other than default
- if not 'post' in args: args['post'] = 'reboot'
- if not 'password' in args: args['password'] = '0000' # Default disk passord, can be <STDIN> or a fixed string
- if not 'minimal' in args: args['minimal'] = False
- if not 'unattended' in args: args['unattended'] = False
- if not 'profile' in args: args['profile'] = None
- if not 'skip-encrypt' in args: args['skip-encrypt'] = False
- if not 'profiles-path' in args: args['profiles-path'] = profiles_path
- if not 'rerun' in args: args['rerun'] = None
- if not 'aur-keep' in args: args['aur-keep'] = False
- if not 'aur-support' in args: args['aur-support'] = True # Support adds yay (https://github.com/Jguer/yay) in installation steps.
- if not 'ignore-rerun' in args: args['ignore-rerun'] = False
- if not 'phone-home' in args: args['phone-home'] = False
-
- # Setup locales if we didn't get one.
- if not 'country' in args:
- country = None
- if get_default_gateway_linux():
- ip = get_external_ip()
- country = guess_country(ip)
- args['country'] = 'all' if not country else country
- if not 'localtime' in args: args['localtime'] = 'Europe/Stockholm' if args['country'] == 'SE' else 'GMT+0' # TODO: Arbitrary for now
-
- return args
-
-def load_automatic_instructions(*positionals, **kwargs):
- instructions = oDict()
- if get_default_gateway_linux(*positionals, **kwargs):
- locmac = get_local_MACs()
- if not len(locmac):
- print('[N] No network interfaces - No net deploy.')
- else:
- for mac in locmac:
- instructions = get_instructions(mac, *positionals, **kwargs)
-
- if 'args' in instructions:
- ## == Recursively fetch instructions if "include" is found under {args: ...}
- while 'include' in instructions['args']:
- includes = instructions['args']['include']
- print('[!] Importing net-deploy target: {}'.format(includes))
- del(instructions['args']['include'])
- if type(includes) in (dict, list):
- for include in includes:
- instructions = merge_dicts(instructions, get_instructions(include, *positionals, **kwargs), before=True)
- else:
- instructions = merge_dicts(instructions, get_instructions(includes, *positionals, **kwargs), before=True)
-
- ## Update arguments if we found any
- for key, val in instructions['args'].items():
- args[key] = val
- if 'user_args' in kwargs:
- for key, val in kwargs['user_args'].items():
- args[key] = val
- else:
- print('[N] No gateway - No net deploy')
-
- return instructions
-
-def cache_diskpw_on_disk():
- if not os.path.isfile(args['pwfile']):
- #PIN = '0000'
- with open(args['pwfile'], 'w') as pw:
- pw.write(args['password'])
-
-def refresh_partition_list(drive, *positionals, **kwargs):
- drive = args[drive]
- if not 'partitions' in args:
- args['partitions'] = oDict()
- for index, part_name in enumerate(sorted(get_partitions(drive, *positionals, **kwargs).keys())):
- args['partitions'][str(index+1)] = part_name
- return True
-
-def mkfs_fat32(drive, partition, *positionals, **kwargs):
- drive = args[drive]
- partition = args['partitions'][partition]
- o = b''.join(sys_command(f'/usr/bin/mkfs.vfat -F32 {drive}{partition}'))
- if (b'mkfs.fat' not in o and b'mkfs.vfat' not in o) or b'command not found' in o:
- return None
- return True
-
-def is_luksdev_mounted(*positionals, **kwargs):
- o = b''.join(sys_command('/usr/bin/file /dev/mapper/luksdev', hide_from_log=True)) # /dev/dm-0
- if b'cannot open' in o:
- return False
- return True
-
-def mount_luktsdev(drive, partition, keyfile, *positionals, **kwargs):
- drive = args[drive]
- partition = args['partitions'][partition]
- keyfile = args[keyfile]
- if not is_luksdev_mounted():
- o = b''.join(sys_command(f'/usr/bin/cryptsetup open {drive}{partition} luksdev --key-file {keyfile} --type luks2'.format(**args)))
- return is_luksdev_mounted()
-
-def encrypt_partition(drive, partition, keyfile='/tmp/diskpw', *positionals, **kwargs):
- drive = args[drive]
- partition = args['partitions'][partition]
- keyfile = args[keyfile]
- o = b''.join(sys_command(f'/usr/bin/cryptsetup -q -v --type luks2 --pbkdf argon2i --hash sha512 --key-size 512 --iter-time 10000 --key-file {keyfile} --use-urandom luksFormat {drive}{partition}'))
- if not b'Command successful.' in o:
- return False
- return True
-
-def mkfs_btrfs(drive='/dev/mapper/luksdev', *positionals, **kwargs):
- o = b''.join(sys_command(f'/usr/bin/mkfs.btrfs -f {drive}'))
- if not b'UUID' in o:
- return False
- return True
-
-def mount_luksdev(where='/dev/mapper/luksdev', to='/mnt', *positionals, **kwargs):
- check_mounted = simple_command('/usr/bin/mount | /usr/bin/grep /mnt', *positionals, **kwargs).decode('UTF-8').strip()# /dev/dm-0
- if len(check_mounted):
- return False
-
- o = b''.join(sys_command('/usr/bin/mount /dev/mapper/luksdev /mnt', *positionals, **kwargs))
- return True
-
-def mount_part(drive, partition, mountpoint='/mnt', *positionals, **kwargs):
- os.makedirs(mountpoint, exist_ok=True)
- #o = b''.join(sys_command('/usr/bin/mount | /usr/bin/grep /mnt/boot', *positionals, **kwargs)) # /dev/dm-0
-
- check_mounted = simple_command(f'/usr/bin/mount | /usr/bin/grep {mountpoint}', *positionals, **kwargs).decode('UTF-8').strip()
- if len(check_mounted):
- return False
-
- o = b''.join(sys_command(f'/usr/bin/mount {drive}{partition} {mountpoint}', *positionals, **kwargs))
- return True
-
-def mount_boot(drive, partition, mountpoint='/mnt/boot', *positionals, **kwargs):
- os.makedirs('/mnt/boot', exist_ok=True)
- #o = b''.join(sys_command('/usr/bin/mount | /usr/bin/grep /mnt/boot', *positionals, **kwargs)) # /dev/dm-0
-
- check_mounted = simple_command('/usr/bin/mount | /usr/bin/grep /mnt/boot', *positionals, **kwargs).decode('UTF-8').strip()
- if len(check_mounted):
- return False
-
- o = b''.join(sys_command(f'/usr/bin/mount {drive}{partition} {mountpoint}', *positionals, **kwargs))
- return True
-
-def mount_mountpoints(drive, bootpartition, mountpoint='/mnt', *positionals, **kwargs):
- drive = args[drive]
- if args['skip-encrypt']:
- mount_part(drive, args['partitions']["2"], mountpoint, *positionals, **kwargs)
- else:
- mount_luksdev(*positionals, **kwargs)
- mount_boot(drive, args['partitions'][bootpartition], mountpoint=f'{mountpoint}/boot', *positionals, **kwargs)
- return True
-
-def re_rank_mirrors(top=10, *positionals, **kwargs):
- if (cmd := sys_command((f'/usr/bin/rankmirrors -n {top} /etc/pacman.d/mirrorlist > /etc/pacman.d/mirrorlist'))).exit_code == 0:
- return True
- log(f"Could not re-rank mirrors: {cmd.trace_log}", level=3, origin='re_rank_mirrors')
- return False
-
-def filter_mirrors_by_country_list(countries, top=None, *positionals, **kwargs):
- ## TODO: replace wget with urllib.request (no point in calling syscommand)
- country_list = []
- for country in countries.split(','):
- country_list.append(f'country={country}')
-
- if not SAFETY_LOCK:
- o = b''.join(sys_command((f"/usr/bin/wget 'https://www.archlinux.org/mirrorlist/?{'&'.join(country_list)}&protocol=https&ip_version=4&ip_version=6&use_mirror_status=on' -O /root/mirrorlist")))
- o = b''.join(sys_command(("/usr/bin/sed -i 's/#Server/Server/' /root/mirrorlist")))
- o = b''.join(sys_command(("/usr/bin/mv /root/mirrorlist /etc/pacman.d/")))
-
- if top:
- re_rank_mirrors(top, *positionals, **kwargs) or not os.path.isfile('/etc/pacman.d/mirrorlist')
-
- return True
-
-def add_custom_mirror(name, url, *positionals, **kwargs):
- if not SAFETY_LOCK:
- commandlog.append('# Adding custom mirror to /etc/pacman.conf')
- with open('/etc/pacman.conf', 'a') as mirrorlist:
- commandlog.append(f'# {name} @ {url}')
- mirrorlist.write('\n')
- mirrorlist.write(f'[{name}]\n')
- mirrorlist.write(f'Server = {url}\n')
- mirrorlist.write(f'SigLevel = Optional TrustAll\n')
- return True
-
-def add_specific_mirrors(mirrors, *positionals, **kwargs):
- if not SAFETY_LOCK:
- commandlog.append('# Adding mirrors to /etc/pacman.d/mirrorlist')
- with open('/etc/pacman.d/mirrorlist', 'a') as mirrorlist:
- mirrorlist.write('\n')
- for url in mirrors:
- commandlog.append(f'# {url}')
- mirrorlist.write(f'# {mirrors[url]}\n')
- mirrorlist.write(f'Server = {url}\n')
- return True
-
-def flush_all_mirrors(*positionals, **kwargs):
- if not SAFETY_LOCK:
- commandlog.append('# Flushed /etc/pacman.d/mirrorlist')
- with open('/etc/pacman.d/mirrorlist', 'w') as mirrorlist:
- mirrorlist.write('\n') # TODO: Not needed.
- return True
-
-def reboot(*positionals, **kwargs):
- simple_command('/usr/bin/sync', *positionals, **kwargs).decode('UTF-8').strip()
- simple_command('/usr/bin/reboot', *positionals, **kwargs).decode('UTF-8').strip()
-
-def strap_in_base(*positionals, **kwargs):
- if not SAFETY_LOCK:
- if args['aur-support']:
- args['packages'] += ' git'
- if (sync_mirrors := sys_command('/usr/bin/pacman -Syy', *positionals, **kwargs)).exit_code == 0:
- x = sys_command('/usr/bin/pacstrap /mnt base base-devel linux linux-firmware btrfs-progs efibootmgr nano wpa_supplicant dialog {packages}'.format(**args), *positionals, **kwargs)
- if x.exit_code == 0:
- return True
- else:
- log(f'Could not strap in base: {x.exit_code}', level=3, origin='strap_in_base')
- else:
- log(f'Could not sync mirrors: {sync_mirrors.exit_code}', level=3, origin='strap_in_base')
- return False
-
-def set_locale(fmt, *positionals, **kwargs):
- if not '.' in fmt:
- if fmt.lower() == 'se':
- fmt = 'en_SE.UTF-8 UTF-8'
- else:
- fmt = 'en_US.UTF-8 UTF-8'
-
- if not SAFETY_LOCK:
- o = b''.join(sys_command(f"/usr/bin/arch-chroot /mnt sh -c \"echo '{fmt}' > /etc/locale.gen\""))
- o = b''.join(sys_command(f"/usr/bin/arch-chroot /mnt sh -c \"echo 'LANG={fmt.split(' ')[0]}' > /etc/locale.conf\""))
- o = b''.join(sys_command('/usr/bin/arch-chroot /mnt locale-gen'))
-
- return True
-
-def configure_base_system(*positionals, **kwargs):
- if not SAFETY_LOCK:
- ## TODO: Replace a lot of these syscalls with just python native operations.
- o = b''.join(sys_command('/usr/bin/genfstab -pU /mnt >> /mnt/etc/fstab'))
- if not os.path.isfile('/mnt/etc/fstab'):
- log(f'Could not locate fstab, strapping in packages most likely failed.', level=3, origin='configure_base_system')
- return False
-
- with open('/mnt/etc/fstab', 'a') as fstab:
- fstab.write('\ntmpfs /tmp tmpfs defaults,noatime,mode=1777 0 0\n') # Redundant \n at the start? who knoes?
-
- o = b''.join(sys_command('/usr/bin/arch-chroot /mnt rm -f /etc/localtime'))
- o = b''.join(sys_command('/usr/bin/arch-chroot /mnt ln -s /usr/share/zoneinfo/{localtime} /etc/localtime'.format(**args)))
- o = b''.join(sys_command('/usr/bin/arch-chroot /mnt hwclock --hctosys --localtime'))
- #o = sys_command('arch-chroot /mnt echo "{hostname}" > /etc/hostname'.format(**args))
- #o = sys_command("arch-chroot /mnt sed -i 's/#\(en_US\.UTF-8\)/\1/' /etc/locale.gen")
-
- o = b''.join(sys_command("/usr/bin/arch-chroot /mnt sh -c \"echo '{hostname}' > /etc/hostname\"".format(**args)))
- o = b''.join(sys_command('/usr/bin/arch-chroot /mnt chmod 700 /root'))
-
- with open('/mnt/etc/mkinitcpio.conf', 'w') as mkinit:
- ## TODO: Don't replace it, in case some update in the future actually adds something.
- mkinit.write('MODULES=(btrfs)\n')
- mkinit.write('BINARIES=(/usr/bin/btrfs)\n')
- mkinit.write('FILES=()\n')
- mkinit.write('HOOKS=(base udev autodetect modconf block encrypt filesystems keyboard fsck)\n')
- o = b''.join(sys_command('/usr/bin/arch-chroot /mnt mkinitcpio -p linux'))
-
- return True
-
-def setup_bootloader(*positionals, **kwargs):
- o = b''.join(sys_command('/usr/bin/arch-chroot /mnt bootctl --no-variables --path=/boot install'))
-
- with open('/mnt/boot/loader/loader.conf', 'w') as loader:
- loader.write('default arch\n')
- loader.write('timeout 5\n')
-
- ## For some reason, blkid and /dev/disk/by-uuid are not getting along well.
- ## And blkid is wrong in terms of LUKS.
- #UUID = sys_command('blkid -s PARTUUID -o value {drive}{partition_2}'.format(**args)).decode('UTF-8').strip()
- with open('/mnt/boot/loader/entries/arch.conf', 'w') as entry:
- entry.write('title Arch Linux\n')
- entry.write('linux /vmlinuz-linux\n')
- entry.write('initrd /initramfs-linux.img\n')
- if args['skip-encrypt']:
- ## NOTE: We could use /dev/disk/by-partuuid but blkid does the same and a lot cleaner
- UUID = simple_command(f"blkid -s PARTUUID -o value /dev/{os.path.basename(args['drive'])}{args['partitions']['2']}").decode('UTF-8').strip()
- entry.write('options root=PARTUUID={UUID} rw intel_pstate=no_hwp\n'.format(UUID=UUID))
- else:
- UUID = simple_command(f"ls -l /dev/disk/by-uuid/ | grep {os.path.basename(args['drive'])}{args['partitions']['2']} | awk '{{print $9}}'").decode('UTF-8').strip()
- entry.write('options cryptdevice=UUID={UUID}:luksdev root=/dev/mapper/luksdev rw intel_pstate=no_hwp\n'.format(UUID=UUID))
-
- return True
-
-def add_AUR_support(*positionals, **kwargs):
- o = b''.join(sys_command('/usr/bin/arch-chroot /mnt sh -c "useradd -m -G wheel aibuilder"'))
- o = b''.join(sys_command("/usr/bin/sed -i 's/# %wheel ALL=(ALL) NO/%wheel ALL=(ALL) NO/' /mnt/etc/sudoers"))
-
- o = b''.join(sys_command('/usr/bin/arch-chroot /mnt sh -c "su - aibuilder -c \\"(cd /home/aibuilder; git clone https://aur.archlinux.org/yay.git)\\""'))
- o = b''.join(sys_command('/usr/bin/arch-chroot /mnt sh -c "chown -R aibuilder.aibuilder /home/aibuilder/yay"'))
- o = b''.join(sys_command('/usr/bin/arch-chroot /mnt sh -c "su - aibuilder -c \\"(cd /home/aibuilder/yay; makepkg -si --noconfirm)\\" >/dev/null"'))
- ## Do not remove aibuilder just yet, can be used later for aur packages.
- #o = b''.join(sys_command('/usr/bin/sed -i \'s/%wheel ALL=(ALL) NO/# %wheel ALL=(ALL) NO/\' /mnt/etc/sudoers'))
- #o = b''.join(sys_command('/usr/bin/arch-chroot /mnt sh -c "userdel aibuilder"'))
- #o = b''.join(sys_command('/usr/bin/arch-chroot /mnt sh -c "rm -rf /home/aibuilder"'))
- return True
-
-def run_post_install_steps(*positionals, **kwargs):
- log(f'Running post installation with input data {instructions}.', level=4, origin='run_post_install_steps')
- conf = {}
- if 'post' in instructions:
- conf = instructions['post']
- elif not 'args' in instructions and len(instructions):
- conf = instructions
-
- if 'git-branch' in conf:
- update_git(conf['git-branch'])
- del(conf['git-branch'])
-
- rerun = args['ignore-rerun']
- for title in conf:
- log(f'Running post installation step {title}', level=4, origin='run_post_install_steps')
- if args['rerun'] and args['rerun'] != title and not rerun:
- continue
- else:
- rerun = True
-
- print('[N] Network Deploy: {}'.format(title))
- if type(conf[title]) == str:
- print('[N] Loading {} configuration'.format(conf[title]))
- log(f'Loading {conf[title]} configuration', level=4, origin='run_post_install_steps')
- conf[title] = get_application_instructions(conf[title])
-
- for command in conf[title]:
- raw_command = command
- opts = conf[title][command] if type(conf[title][command]) in (dict, oDict) else {}
- if len(opts):
- if 'pass-args' in opts or 'format' in opts:
- command = command.format(**args)
- ## FIXME: Instead of deleting the two options
- ## in order to mute command output further down,
- ## check for a 'debug' flag per command and delete these two
- if 'pass-args' in opts:
- del(opts['pass-args'])
- elif 'format' in opts:
- del(opts['format'])
- elif ('debug' in opts and opts['debug']) or ('debug' in conf and conf['debug']):
- print('[-] Options: {}'.format(opts))
- if 'pass-args' in opts and opts['pass-args']:
- command = command.format(**args)
-
- if 'runas' in opts and f'su - {opts["runas"]} -c' not in command:
- command = command.replace('"', '\\"')
- command = f'su - {opts["runas"]} -c "{command}"'
-
- #print('[N] Command: {} ({})'.format(command, opts))
-
- ## https://superuser.com/questions/1242978/start-systemd-nspawn-and-execute-commands-inside
- ## !IMPORTANT
- ##
- ## arch-chroot mounts /run into the chroot environment, this breaks name resolves for some reason.
- ## Either skipping mounting /run and using traditional chroot is an option, but using
- ## `systemd-nspawn -D /mnt --machine temporary` might be a more flexible solution in case of file structure changes.
- if 'no-chroot' in opts and opts['no-chroot']:
- log(f'Executing {command} as simple command from live-cd.', level=4, origin='run_post_install_steps')
- o = simple_command(command, opts, *positionals, **kwargs)
- elif 'chroot' in opts and opts['chroot']:
- log(f'Executing {command} in chroot.', level=4, origin='run_post_install_steps')
- ## Run in a manually set up version of arch-chroot (arch-chroot will break namespaces).
- ## This is a bit risky in case the file systems changes over the years, but we'll probably be safe adding this as an option.
- ## **> Prefer if possible to use 'no-chroot' instead which "live boots" the OS and runs the command.
- o = simple_command("mount /dev/mapper/luksdev /mnt")
- o = simple_command("cd /mnt; cp /etc/resolv.conf etc")
- o = simple_command("cd /mnt; mount -t proc /proc proc")
- o = simple_command("cd /mnt; mount --make-rslave --rbind /sys sys")
- o = simple_command("cd /mnt; mount --make-rslave --rbind /dev dev")
- o = simple_command('chroot /mnt /bin/bash -c "{c}"'.format(c=command), opts=opts, *positionals, **kwargs)
- o = simple_command("cd /mnt; umount -R dev")
- o = simple_command("cd /mnt; umount -R sys")
- o = simple_command("cd /mnt; umount -R proc")
- else:
- if 'boot' in opts and opts['boot']:
- log(f'Executing {command} in boot mode.', level=4, origin='run_post_install_steps')
- ## So, if we're going to boot this maddafakker up, we'll need to
- ## be able to login. The quickest way is to just add automatic login.. so lessgo!
-
- ## Turns out.. that didn't work exactly as planned..
- ##
- # if not os.path.isdir('/mnt/etc/systemd/system/console-getty.service.d/'):
- # os.makedirs('/mnt/etc/systemd/system/console-getty.service.d/')
- # with open('/mnt/etc/systemd/system/console-getty.service.d/override.conf', 'w') as fh:
- # fh.write('[Service]\n')
- # fh.write('ExecStart=\n')
- # fh.write('ExecStart=-/usr/bin/agetty --autologin root -s %I 115200,38400,9600 vt102\n')
-
- ## So we'll add a bunch of triggers instead and let the sys_command manually react to them.
- ## "<hostname> login" followed by "Passwodd" in case it's been set in a previous step.. usually this shouldn't be nessecary
- ## since we set the password as the last step. And then the command itself which will be executed by looking for:
- ## [root@<hostname> ~]#
- defaults = {
- 'login:' : 'root\n',
- 'Password:' : args['password']+'\n',
- '[root@{args["hostname"]} ~]#' : command+'\n',
- }
- if not 'events' in opts: opts['events'] = {}
- events = {**defaults, **opts['events']}
- del(opts['events'])
- o = b''.join(sys_command('/usr/bin/systemd-nspawn -D /mnt -b --machine temporary', *positionals, **{'events' : events, **kwargs, **opts}))
-
- ## Not needed anymore: And cleanup after out selves.. Don't want to leave any residue..
- # os.remove('/mnt/etc/systemd/system/console-getty.service.d/override.conf')
- else:
- log(f'Executing {command} in with systemd-nspawn without boot.', level=4, origin='run_post_install_steps')
- o = b''.join(sys_command(f'/usr/bin/systemd-nspawn -D /mnt --machine temporary {command}', *positionals, **{**kwargs, **opts}))
- if type(conf[title][raw_command]) == bytes and len(conf[title][raw_command]) and not conf[title][raw_command] in o:
- log(f'{command} failed: {o.decode("UTF-8")}', level=4, origin='run_post_install_steps')
- print('[W] Post install command failed: {}'.format(o.decode('UTF-8')))
- #print(o)
-
- print('run_post_install_steps() is complete.')
- return True
-
-def create_user(username, password='', groups=[]):
- if username:
- o = (f'/usr/bin/arch-chroot /mnt useradd -m -G wheel {username}')
- if password:
- o = (f"/usr/bin/arch-chroot /mnt sh -c \"echo '{username}:{password}' | chpasswd\"")
- if groups:
- for group in groups:
- o = (f'/usr/bin/arch-chroot /mnt gpasswd -a {username} {group}')
- return True
-
-def prerequisit_check():
- if not os.path.isdir('/sys/firmware/efi'):
- return False, 'Archinstall only supports UEFI-booted machines.'
-
- return True
-
-if __name__ == '__main__':
-
- if not (prereq := prerequisit_check()) is True:
- print(f'[E] {prereq[1]}')
- exit(1)
-
- ## Setup some defaults
- # (in case no command-line parameters or netdeploy-params were given)
- args = setup_args_defaults(args)
- user_args = {}
- positionals = []
- for arg in sys.argv[1:]:
- if '--' == arg[:2]:
- if '=' in arg:
- key, val = [x.strip() for x in arg[2:].split('=')]
- else:
- key, val = arg[2:], True
- args[key] = val
- user_args[key] = val
- else:
- positionals.append(arg)
-
- update_git() # Breaks and restarts the script if an update was found.
- update_drive_list()
-
- ## == If we got networking,
- # Try fetching instructions for this box unless a specific profile was given, and execute them.
- if args['profile'] is None and not args['minimal']:
- instructions = load_automatic_instructions(user_args=user_args)
-
- elif args['profile'] and not args['minimal']:
- instructions = get_instructions(args['profile'])
- if len(instructions) <= 0:
- print('[E] No instructions by the name of {} was found.'.format(args['profile']))
- print(' Installation won\'t continue until a valid profile is given.')
- print(' (this is because --profile was given and a --default is not given)')
- exit(1)
-
- first = True
- while not args['minimal'] and not args['profile'] and len(instructions) <= 0:
- profile = input('What template do you want to install: ')
- instructions = get_instructions(profile)
- if first and len(instructions) <= 0:
- print('[E] No instructions by the name of {} was found.'.format(profile))
- print(' Installation won\'t continue until a valid profile is given.')
- print(' (this is because --default is not instructed and no --profile given)')
- first = False
-
- # TODO: Might not need to return anything here, passed by reference?
- instructions = merge_in_includes(instructions, user_args=user_args)
- cleanup_args()
-
- ## If no drive was found in args, select one.
- if not 'drive' in args:
- if len(harddrives):
- drives = sorted(list(harddrives.keys()))
- if len(drives) > 1 and 'force' not in args and not 'unattended' in args and ('minimal' in args and 'first-drive' not in args):
- for index, drive in enumerate(drives):
- print(f"{index}: {drive} ({harddrives[drive]['size'], harddrives[drive]['fstype'], harddrives[drive]['label']})")
- drive = input('Select one of the above disks (by number): ')
- if not drive.isdigit():
- raise KeyError("Multiple disks found, --drive=/dev/X not specified (or --force/--first-drive)")
- drives = [drives[int(drive)]] # Make sure only the selected drive is in the list of options
- args['drive'] = drives[0] # First drive found
- else:
- args['drive'] = None
-
- if args['drive'] and args['drive'][0] != '/':
- ## Remap the selected UUID to the device to be formatted.
- drive = get_drive_from_uuid(args['drive'])
- if not drive:
- print(f'[N] Could not map UUID "{args["drive"]}" to a device. Trying to match via PARTUUID instead!')
-
- drive = get_drive_from_part_uuid(args['drive'])
- if not drive:
- print(f'[E] Could not map UUID "{args["drive"]}" to a device. Aborting!')
- exit(1)
-
- args['drive'] = drive
-
- print(json.dumps(args, indent=4))
- if args['minimal'] and not 'force' in args and not 'unattended' in args:
- if(input('Are these settings OK? (No return beyond this point) N/y: ').lower() != 'y'):
- exit(1)
-
- cache_diskpw_on_disk()
- #else:
- # ## TODO: Convert to `rb` instead.
- # # We shouldn't discriminate \xfu from being a passwd phrase.
- # with open(args['pwfile'], 'r') as pw:
- # PIN = pw.read().strip()
-
- print()
- if not args['skip-encrypt']:
- print('[!] Disk & root PASSWORD is: {}'.format(args['password']))
- else:
- print('[!] root PASSWORD is: {}'.format(args['password']))
- print()
-
- if not args['rerun'] or args['ignore-rerun']:
- for i in range(5, 0, -1):
- print(f'Formatting {args["drive"]} in {i}...')
- time.sleep(1)
-
- close_disks()
- print(f'[N] Setting up {args["drive"]}.')
- if not format_disk('drive', start='start', end='size', debug=True):
- print(f'[E] Coult not format drive {args["drive"]}')
- exit(1)
-
- refresh_partition_list('drive')
- print(f'[N] Partitions: {len(args["partitions"])} (Boot: {list(args["partitions"].keys())[0]})')
-
- if len(args['partitions']) <= 0:
- print(f'[E] No partitions were created on {args["drive"]}', o)
- exit(1)
-
- if not args['rerun'] or args['ignore-rerun']:
- if not mkfs_fat32('drive', '1'):
- print(f'[E] Could not setup {args["drive"]}{args["partitions"]["1"]}')
- exit(1)
-
- if not args['skip-encrypt']:
- # "--cipher sha512" breaks the shit.
- # TODO: --use-random instead of --use-urandom
- print(f'[N] Adding encryption to {args["drive"]}{args["partitions"]["2"]}.')
- if not encrypt_partition('drive', '2', 'pwfile'):
- print('[E] Failed to setup disk encryption.', o)
- exit(1)
-
- if not args['skip-encrypt']:
- if not mount_luktsdev('drive', '2', 'pwfile'):
- print('[E] Could not open encrypted device.', o)
- exit(1)
-
- if not args['rerun'] or args['ignore-rerun']:
- print(f'[N] Creating btrfs filesystem inside {args["drive"]}{args["partitions"]["2"]}')
-
- on_part = '/dev/mapper/luksdev'
- if args['skip-encrypt']:
- on_part = f'{args["drive"]}{args["partitions"]["2"]}'
- if not mkfs_btrfs(on_part):
- print('[E] Could not setup btrfs filesystem.')
- exit(1)
-
- mount_mountpoints('drive', '1')
-
- if 'mirrors' in args and args['mirrors'] and 'country' in args and get_default_gateway_linux():
- print('[N] Reordering mirrors.')
- filter_mirrors_by_country_list(args['country'])
-
- pre_conf = {}
- if 'pre' in instructions:
- pre_conf = instructions['pre']
- elif 'prerequisits' in instructions:
- pre_conf = instructions['prerequisits']
-
- if 'git-branch' in pre_conf:
- update_git(pre_conf['git-branch'])
- del(pre_conf['git-branch'])
-
- rerun = args['ignore-rerun']
-
- ## Prerequisit steps needs to NOT be executed in arch-chroot.
- ## Mainly because there's no root structure to chroot into.
- ## But partly because some configurations need to be done against the live CD.
- ## (For instance, modifying mirrors are done on LiveCD and replicated intwards)
- for title in pre_conf:
- print('[N] Network prerequisit step: {}'.format(title))
- if args['rerun'] and args['rerun'] != title and not rerun:
- continue
- else:
- rerun = True
-
- for command in pre_conf[title]:
- raw_command = command
- opts = pre_conf[title][raw_command] if type(pre_conf[title][raw_command]) in (dict, oDict) else {}
- if len(opts):
- if 'pass-args' in opts or 'format' in opts:
- command = command.format(**args)
- ## FIXME: Instead of deleting the two options
- ## in order to mute command output further down,
- ## check for a 'debug' flag per command and delete these two
- if 'pass-args' in opts:
- del(opts['pass-args'])
- elif 'format' in opts:
- del(opts['format'])
- elif 'debug' in opts and opts['debug']:
- print('[N] Complete command-string: '.format(command))
- else:
- print('[-] Options: {}'.format(opts))
-
- #print('[N] Command: {} ({})'.format(raw_command, opts))
- o = b''.join(sys_command('{c}'.format(c=command), opts))
- if type(conf[title][raw_command]) == bytes and len(conf[title][raw_command]) and not conf[title][raw_command] in b''.join(o):
- print('[W] Prerequisit step failed: {}'.format(b''.join(o).decode('UTF-8')))
- #print(o)
-
- if not args['rerun'] or rerun:
- print('[N] Straping in packages.')
- base_return_code = strap_in_base() # TODO: check return here? we return based off pacstrap exit code.. Never tired it tho.
- else:
- base_return_code = None
-
- if not os.path.isdir('/mnt/etc'): # TODO: This might not be the most long term stable thing to rely on...
- print('[E] Failed to strap in packages', o)
- exit(1)
-
- if not args['rerun'] or rerun:
- print('[N] Configuring base system.')
- set_locale('en_US.UTF-8 UTF-8')
- configure_base_system()
- ## WORKAROUND: https://github.com/systemd/systemd/issues/13603#issuecomment-552246188
- print('[N] Setting up bootloader.')
- setup_bootloader()
-
- if args['aur-support']:
- print('[N] AUR support demanded, building "yay" before running POST steps.')
- add_AUR_support()
- print('[N] AUR support added. use "yay -Syy --noconfirm <package>" to deploy in POST.')
-
- ## == Passwords
- # o = sys_command('arch-chroot /mnt usermod --password {} root'.format(args['password']))
- # o = sys_command("arch-chroot /mnt sh -c 'echo {pin} | passwd --stdin root'".format(pin='"{pin}"'.format(**args, pin=args['password'])), echo=True)
- set_password(user='root', password=args['password'])
- time.sleep(5)
- if 'user' in args:
- create_user(args['user'], args['password'])#, groups=['wheel'])
-
- print('[N] Running post installation steps.')
- run_post_install_steps()
- time.sleep(2)
-
- if args['aur-support'] and not args['aur-keep']:
- o = b''.join(sys_command('/usr/bin/sed -i \'s/%wheel ALL=(ALL) NO/# %wheel ALL=(ALL) NO/\' /mnt/etc/sudoers'))
- o = b''.join(sys_command('/usr/bin/arch-chroot /mnt sh -c "userdel aibuilder"'))
- o = b''.join(sys_command('/usr/bin/arch-chroot /mnt sh -c "rm -rf /home/aibuilder"'))
-
- if args['phone-home']:
- phone_home(args['phone-home'])
-
- if args['post'] == 'reboot':
- o = simple_command('/usr/bin/umount -R /mnt')
- o = simple_command('/usr/bin/reboot now')
- else:
- print('Done. "umount -R /mnt; reboot" when you\'re done tinkering.')
diff --git a/archinstall/__init__.py b/archinstall/__init__.py
new file mode 100644
index 00000000..9cf7faec
--- /dev/null
+++ b/archinstall/__init__.py
@@ -0,0 +1,7 @@
+from .lib.general import *
+from .lib.disk import *
+from .lib.user_interaction import *
+from .lib.exceptions import *
+from .lib.installer import *
+from .lib.profiles import *
+from .lib.luks import * \ No newline at end of file
diff --git a/archinstall/lib/__init__.py b/archinstall/lib/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/archinstall/lib/__init__.py
diff --git a/archinstall/lib/disk.py b/archinstall/lib/disk.py
new file mode 100644
index 00000000..1bdff8e2
--- /dev/null
+++ b/archinstall/lib/disk.py
@@ -0,0 +1,210 @@
+import glob, re, os, json
+from collections import OrderedDict
+from .exceptions import *
+from .general import sys_command
+
+ROOT_DIR_PATTERN = re.compile('^.*?/devices')
+GPT = 0b00000001
+
+#import ctypes
+#import ctypes.util
+#libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
+#libc.mount.argtypes = (ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_ulong, ctypes.c_char_p)
+
+class BlockDevice():
+ def __init__(self, path, info):
+ self.path = path
+ self.info = info
+ self.part_cache = OrderedDict()
+
+ @property
+ def device(self):
+ """
+ Returns the actual device-endpoint of the BlockDevice.
+ If it's a loop-back-device it returns the back-file,
+ If it's a ATA-drive it returns the /dev/X device
+ And if it's a crypto-device it returns the parent device
+ """
+ if not 'type' in self.info: raise DiskError(f'Could not locate backplane info for "{self.path}"')
+
+ if self.info['type'] == 'loop':
+ for drive in json.loads(b''.join(sys_command(f'losetup --json', hide_from_log=True)).decode('UTF_8'))['loopdevices']:
+ if not drive['name'] == self.path: continue
+
+ return drive['back-file']
+ elif self.info['type'] == 'disk':
+ return self.path
+ elif self.info['type'] == 'crypt':
+ if not 'pkname' in self.info: raise DiskError(f'A crypt device ({self.path}) without a parent kernel device name.')
+ return f"/dev/{self.info['pkname']}"
+
+ # if not stat.S_ISBLK(os.stat(full_path).st_mode):
+ # raise DiskError(f'Selected disk "{full_path}" is not a block device.')
+
+ @property
+ def partitions(self):
+ o = b''.join(sys_command(f'partprobe {self.path}'))
+
+ #o = b''.join(sys_command('/usr/bin/lsblk -o name -J -b {dev}'.format(dev=dev)))
+ o = b''.join(sys_command(f'/usr/bin/lsblk -J {self.path}'))
+ if b'not a block device' in o:
+ raise DiskError(f'Can not read partitions off something that isn\'t a block device: {self.path}')
+
+ if not o[:1] == b'{':
+ raise DiskError(f'Error getting JSON output from:', f'/usr/bin/lsblk -J {self.path}')
+
+ r = json.loads(o.decode('UTF-8'))
+ if len(r['blockdevices']) and 'children' in r['blockdevices'][0]:
+ root_path = f"/dev/{r['blockdevices'][0]['name']}"
+ for part in r['blockdevices'][0]['children']:
+ part_id = part['name'][len(os.path.basename(self.path)):]
+ if part_id not in self.part_cache:
+ ## TODO: Force over-write even if in cache?
+ self.part_cache[part_id] = Partition(root_path + part_id, part_id=part_id, size=part['size'])
+
+ return {k: self.part_cache[k] for k in sorted(self.part_cache)}
+
+ @property
+ def partition(self):
+ all_partitions = self.partitions
+ return [all_partitions[k] for k in all_partitions]
+
+ def __repr__(self, *args, **kwargs):
+ return f"BlockDevice({self.device})"
+
+ def __getitem__(self, key, *args, **kwargs):
+ if not key in self.info:
+ raise KeyError(f'{self} does not contain information: "{key}"')
+ return self.info[key]
+
+class Partition():
+ def __init__(self, path, part_id=None, size=-1, filesystem=None, mountpoint=None):
+ if not part_id: part_id = os.path.basename(path)
+ self.path = path
+ self.part_id = part_id
+ self.mountpoint = mountpoint
+ self.filesystem = filesystem # TODO: Autodetect if we're reusing a partition
+ self.size = size # TODO: Refresh?
+
+ def __repr__(self, *args, **kwargs):
+ return f'Partition({self.path}, fs={self.filesystem}, mounted={self.mountpoint})'
+
+ def format(self, filesystem):
+ print(f'Formatting {self} -> {filesystem}')
+ if filesystem == 'btrfs':
+ o = b''.join(sys_command(f'/usr/bin/mkfs.btrfs -f {self.path}'))
+ if not b'UUID' in o:
+ raise DiskError(f'Could not format {self.path} with {filesystem} because: {o}')
+ self.filesystem = 'btrfs'
+ elif filesystem == 'fat32':
+ o = b''.join(sys_command(f'/usr/bin/mkfs.vfat -F32 {self.path}'))
+ if (b'mkfs.fat' not in o and b'mkfs.vfat' not in o) or b'command not found' in o:
+ raise DiskError(f'Could not format {self.path} with {filesystem} because: {o}')
+ self.filesystem = 'fat32'
+ else:
+ raise DiskError(f'Fileformat {filesystem} is not yet implemented.')
+ return True
+
+ def mount(self, target, fs=None, options=''):
+ if not self.mountpoint:
+ print(f'Mounting {self} to {target}')
+ if not fs:
+ if not self.filesystem: raise DiskError(f'Need to format (or define) the filesystem on {self} before mounting.')
+ fs = self.filesystem
+ ## libc has some issues with loop devices, defaulting back to sys calls
+ # ret = libc.mount(self.path.encode(), target.encode(), fs.encode(), 0, options.encode())
+ # if ret < 0:
+ # errno = ctypes.get_errno()
+ # raise OSError(errno, f"Error mounting {self.path} ({fs}) on {target} with options '{options}': {os.strerror(errno)}")
+ if sys_command(f'/usr/bin/mount {self.path} {target}').exit_code == 0:
+ self.mountpoint = target
+ return True
+
+class Filesystem():
+ # TODO:
+ # When instance of a HDD is selected, check all usages and gracefully unmount them
+ # as well as close any crypto handles.
+ def __init__(self, blockdevice, mode=GPT):
+ self.blockdevice = blockdevice
+ self.mode = mode
+
+ def __enter__(self, *args, **kwargs):
+ if self.mode == GPT:
+ if sys_command(f'/usr/bin/parted -s {self.blockdevice.device} mklabel gpt',).exit_code == 0:
+ return self
+ else:
+ raise DiskError(f'Problem setting the partition format to GPT:', f'/usr/bin/parted -s {self.blockdevice.device} mklabel gpt')
+ else:
+ raise DiskError(f'Unknown mode selected to format in: {self.mode}')
+
+ def __exit__(self, *args, **kwargs):
+ # TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager
+ if len(args) >= 2 and args[1]:
+ raise args[1]
+ b''.join(sys_command(f'sync'))
+ return True
+
+ def raw_parted(self, string:str):
+ x = sys_command(f'/usr/bin/parted -s {string}')
+ o = b''.join(x)
+ return x
+
+ def parted(self, string:str):
+ """
+ Performs a parted execution of the given string
+
+ :param string: A raw string passed to /usr/bin/parted -s <string>
+ :type string: str
+ """
+ return self.raw_parted(string).exit_code
+
+ def use_entire_disk(self, prep_mode=None):
+ self.add_partition('primary', start='1MiB', end='513MiB', format='fat32')
+ self.set_name(0, 'EFI')
+ self.set(0, 'boot on')
+ self.set(0, 'esp on') # TODO: Redundant, as in GPT mode it's an alias for "boot on"? https://www.gnu.org/software/parted/manual/html_node/set.html
+ if prep_mode == 'luks2':
+ self.add_partition('primary', start='513MiB', end='100%')
+ else:
+ self.add_partition('primary', start='513MiB', end='513MiB', format='ext4')
+
+ def add_partition(self, type, start, end, format=None):
+ print(f'Adding partition to {self.blockdevice}')
+ if format:
+ return self.parted(f'{self.blockdevice.device} mkpart {type} {format} {start} {end}') == 0
+ else:
+ return self.parted(f'{self.blockdevice.device} mkpart {type} {start} {end}') == 0
+
+ def set_name(self, partition:int, name:str):
+ return self.parted(f'{self.blockdevice.device} name {partition+1} "{name}"') == 0
+
+ def set(self, partition:int, string:str):
+ return self.parted(f'{self.blockdevice.device} set {partition+1} {string}') == 0
+
+def device_state(name, *args, **kwargs):
+ # Based out of: https://askubuntu.com/questions/528690/how-to-get-list-of-all-non-removable-disk-device-names-ssd-hdd-and-sata-ide-onl/528709#528709
+ if os.path.isfile('/sys/block/{}/device/block/{}/removable'.format(name, name)):
+ with open('/sys/block/{}/device/block/{}/removable'.format(name, name)) as f:
+ if f.read(1) == '1':
+ return
+
+ path = ROOT_DIR_PATTERN.sub('', os.readlink('/sys/block/{}'.format(name)))
+ hotplug_buses = ("usb", "ieee1394", "mmc", "pcmcia", "firewire")
+ for bus in hotplug_buses:
+ if os.path.exists('/sys/bus/{}'.format(bus)):
+ for device_bus in os.listdir('/sys/bus/{}/devices'.format(bus)):
+ device_link = ROOT_DIR_PATTERN.sub('', os.readlink('/sys/bus/{}/devices/{}'.format(bus, device_bus)))
+ if re.search(device_link, path):
+ return
+ return True
+
+# lsblk --json -l -n -o path
+def all_disks(*args, **kwargs):
+ if not 'partitions' in kwargs: kwargs['partitions'] = False
+ drives = OrderedDict()
+ #for drive in json.loads(sys_command(f'losetup --json', *args, **lkwargs, hide_from_log=True)).decode('UTF_8')['loopdevices']:
+ for drive in json.loads(b''.join(sys_command(f'lsblk --json -l -n -o path,size,type,mountpoint,label,pkname', *args, **kwargs, hide_from_log=True)).decode('UTF_8'))['blockdevices']:
+ if not kwargs['partitions'] and drive['type'] == 'part': continue
+
+ drives[drive['path']] = BlockDevice(drive['path'], drive)
+ return drives \ No newline at end of file
diff --git a/archinstall/lib/exceptions.py b/archinstall/lib/exceptions.py
new file mode 100644
index 00000000..9d033147
--- /dev/null
+++ b/archinstall/lib/exceptions.py
@@ -0,0 +1,6 @@
+class RequirementError(BaseException):
+ pass
+class DiskError(BaseException):
+ pass
+class ProfileError(BaseException):
+ pass \ No newline at end of file
diff --git a/archinstall/lib/general.py b/archinstall/lib/general.py
new file mode 100644
index 00000000..89c7f188
--- /dev/null
+++ b/archinstall/lib/general.py
@@ -0,0 +1,204 @@
+import os, json, hashlib, shlex
+import time, pty
+from subprocess import Popen, STDOUT, PIPE, check_output
+from select import epoll, EPOLLIN, EPOLLHUP
+
+def log(*args, **kwargs):
+ print(' '.join([str(x) for x in args]))
+
+def gen_uid(entropy_length=256):
+ return hashlib.sha512(os.urandom(entropy_length)).hexdigest()
+
+def multisplit(s, splitters):
+ s = [s,]
+ for key in splitters:
+ ns = []
+ for obj in s:
+ x = obj.split(key)
+ for index, part in enumerate(x):
+ if len(part):
+ ns.append(part)
+ if index < len(x)-1:
+ ns.append(key)
+ s = ns
+ return s
+
+class sys_command():#Thread):
+ """
+ Stolen from archinstall_gui
+ """
+ def __init__(self, cmd, callback=None, start_callback=None, *args, **kwargs):
+ if not 'worker_id' in kwargs: kwargs['worker_id'] = gen_uid()
+ if not 'emulate' in kwargs: kwargs['emulate'] = False
+ if not 'surpress_errors' in kwargs: kwargs['surpress_errors'] = False
+ if kwargs['emulate']:
+ log(f"Starting command '{cmd}' in emulation mode.")
+ self.raw_cmd = cmd
+ try:
+ self.cmd = shlex.split(cmd)
+ except Exception as e:
+ raise ValueError(f'Incorrect string to split: {cmd}\n{e}')
+ self.args = args
+ self.kwargs = kwargs
+ if not 'worker' in self.kwargs: self.kwargs['worker'] = None
+ self.callback = callback
+ self.pid = None
+ self.exit_code = None
+ self.started = time.time()
+ self.ended = None
+ self.worker_id = kwargs['worker_id']
+ self.trace_log = b''
+ self.status = 'starting'
+
+ user_catalogue = os.path.expanduser('~')
+ self.cwd = f"{user_catalogue}/archinstall/cache/workers/{kwargs['worker_id']}/"
+ self.exec_dir = f'{self.cwd}/{os.path.basename(self.cmd[0])}_workingdir'
+
+ if not self.cmd[0][0] == '/':
+ #log('Worker command is not executed with absolute path, trying to find: {}'.format(self.cmd[0]), origin='spawn', level=5)
+ o = check_output(['/usr/bin/which', self.cmd[0]])
+ #log('This is the binary {} for {}'.format(o.decode('UTF-8'), self.cmd[0]), origin='spawn', level=5)
+ self.cmd[0] = o.decode('UTF-8').strip()
+
+ if not os.path.isdir(self.exec_dir):
+ os.makedirs(self.exec_dir)
+
+ if start_callback: start_callback(self, *args, **kwargs)
+ self.run()
+
+ def __iter__(self, *args, **kwargs):
+ for line in self.trace_log.split(b'\n'):
+ yield line
+
+ def __repr__(self, *args, **kwargs):
+ return f"{self.cmd, self.trace_log}"
+
+ def decode(self, fmt='UTF-8'):
+ return self.trace_log.decode(fmt)
+
+ def dump(self):
+ return {
+ 'status' : self.status,
+ 'worker_id' : self.worker_id,
+ 'worker_result' : self.trace_log.decode('UTF-8'),
+ 'started' : self.started,
+ 'ended' : self.ended,
+ 'started_pprint' : '{}-{}-{} {}:{}:{}'.format(*time.localtime(self.started)),
+ 'ended_pprint' : '{}-{}-{} {}:{}:{}'.format(*time.localtime(self.ended)) if self.ended else None,
+ 'exit_code' : self.exit_code
+ }
+
+ def run(self):
+ self.status = 'running'
+ old_dir = os.getcwd()
+ os.chdir(self.exec_dir)
+ self.pid, child_fd = pty.fork()
+ if not self.pid: # Child process
+ # Replace child process with our main process
+ if not self.kwargs['emulate']:
+ try:
+ os.execv(self.cmd[0], self.cmd)
+ except FileNotFoundError:
+ self.status = 'done'
+ log(f"{self.cmd[0]} does not exist.", origin='spawn', level=2)
+ self.exit_code = 1
+ return False
+
+ os.chdir(old_dir)
+
+ poller = epoll()
+ poller.register(child_fd, EPOLLIN | EPOLLHUP)
+
+ if 'events' in self.kwargs and 'debug' in self.kwargs:
+ log(f'[D] Using triggers for command: {self.cmd}')
+ log(json.dumps(self.kwargs['events']))
+
+ alive = True
+ last_trigger_pos = 0
+ while alive and not self.kwargs['emulate']:
+ for fileno, event in poller.poll(0.1):
+ try:
+ output = os.read(child_fd, 8192).strip()
+ self.trace_log += output
+ except OSError:
+ alive = False
+ break
+
+ if 'debug' in self.kwargs and self.kwargs['debug'] and len(output):
+ log(self.cmd, 'gave:', output.decode('UTF-8'))
+
+ if 'on_output' in self.kwargs:
+ self.kwargs['on_output'](self.kwargs['worker'], output)
+
+ lower = output.lower()
+ broke = False
+ if 'events' in self.kwargs:
+ for trigger in list(self.kwargs['events']):
+ if type(trigger) != bytes:
+ original = trigger
+ trigger = bytes(original, 'UTF-8')
+ self.kwargs['events'][trigger] = self.kwargs['events'][original]
+ del(self.kwargs['events'][original])
+ if type(self.kwargs['events'][trigger]) != bytes:
+ self.kwargs['events'][trigger] = bytes(self.kwargs['events'][trigger], 'UTF-8')
+
+ if trigger.lower() in self.trace_log[last_trigger_pos:].lower():
+ trigger_pos = self.trace_log[last_trigger_pos:].lower().find(trigger.lower())
+
+ if 'debug' in self.kwargs and self.kwargs['debug']:
+ log(f"Writing to subprocess {self.cmd[0]}: {self.kwargs['events'][trigger].decode('UTF-8')}")
+ log(f"Writing to subprocess {self.cmd[0]}: {self.kwargs['events'][trigger].decode('UTF-8')}", origin='spawn', level=5)
+
+ last_trigger_pos = trigger_pos
+ os.write(child_fd, self.kwargs['events'][trigger])
+ del(self.kwargs['events'][trigger])
+ broke = True
+ break
+
+ if broke:
+ continue
+
+ ## Adding a exit trigger:
+ if len(self.kwargs['events']) == 0:
+ if 'debug' in self.kwargs and self.kwargs['debug']:
+ log(f"Waiting for last command {self.cmd[0]} to finish.", origin='spawn', level=4)
+
+ if bytes(f']$'.lower(), 'UTF-8') in self.trace_log[0-len(f']$')-5:].lower():
+ if 'debug' in self.kwargs and self.kwargs['debug']:
+ log(f"{self.cmd[0]} has finished.", origin='spawn', level=4)
+ alive = False
+ break
+
+ self.status = 'done'
+
+ if 'debug' in self.kwargs and self.kwargs['debug']:
+ log(f"{self.cmd[0]} waiting for exit code.", origin='spawn', level=5)
+
+ if not self.kwargs['emulate']:
+ try:
+ self.exit_code = os.waitpid(self.pid, 0)[1]
+ except ChildProcessError:
+ try:
+ self.exit_code = os.waitpid(child_fd, 0)[1]
+ except ChildProcessError:
+ self.exit_code = 1
+ else:
+ self.exit_code = 0
+
+ if 'ignore_errors' in self.kwargs:
+ self.exit_code = 0
+
+ if self.exit_code != 0 and not self.kwargs['surpress_errors']:
+ log(f"'{self.raw_cmd}' did not exit gracefully, exit code {self.exit_code}.", origin='spawn', level=3)
+ log(self.trace_log.decode('UTF-8'), origin='spawn', level=3)
+
+ self.ended = time.time()
+ with open(f'{self.cwd}/trace.log', 'wb') as fh:
+ fh.write(self.trace_log)
+
+def prerequisit_check():
+ if not os.path.isdir('/sys/firmware/efi'):
+ raise RequirementError('Archinstall only supports machines in UEFI mode.')
+
+ return True
+
diff --git a/archinstall/lib/installer.py b/archinstall/lib/installer.py
new file mode 100644
index 00000000..d804818a
--- /dev/null
+++ b/archinstall/lib/installer.py
@@ -0,0 +1,107 @@
+import os, stat
+
+from .exceptions import *
+from .disk import *
+from .general import *
+from .user_interaction import *
+from .profiles import Profile
+
+class Installer():
+ def __init__(self, partition, *, profile=None, mountpoint='/mnt', hostname='ArchInstalled'):
+ self.profile = profile
+ self.hostname = hostname
+ self.mountpoint = mountpoint
+
+ self.partition = partition
+
+ def __enter__(self, *args, **kwargs):
+ self.partition.mount(self.mountpoint)
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ # b''.join(sys_command(f'sync')) # No need to, since the underlaying fs() object will call sync.
+ # TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager
+ if len(args) >= 2 and args[1]:
+ raise args[1]
+ print('Installation completed without any errors.')
+ return True
+
+ def pacstrap(self, *packages):
+ if type(packages[0]) in (list, tuple): packages = packages[0]
+ print(f'Installing packages: {packages}')
+
+ if (sync_mirrors := sys_command('/usr/bin/pacman -Syy')).exit_code == 0:
+ if (pacstrap := sys_command(f'/usr/bin/pacstrap {self.mountpoint} {" ".join(packages)}')).exit_code == 0:
+ return True
+ else:
+ print(f'Could not strap in packages: {pacstrap.exit_code}')
+ else:
+ print(f'Could not sync mirrors: {sync_mirrors.exit_code}')
+
+ def minimal_installation(self):
+ return self.pacstrap('base base-devel linux linux-firmware btrfs-progs efibootmgr nano wpa_supplicant dialog'.split(' '))
+
+ def add_bootloader(self, partition):
+ print(f'Adding bootloader to {partition}')
+ os.makedirs(f'{self.mountpoint}/boot', exist_ok=True)
+ partition.mount(f'{self.mountpoint}/boot')
+ o = b''.join(sys_command(f'/usr/bin/arch-chroot {self.mountpoint} bootctl --no-variables --path=/boot install'))
+
+ with open(f'{self.mountpoint}/boot/loader/loader.conf', 'w') as loader:
+ loader.write('default arch\n')
+ loader.write('timeout 5\n')
+
+ ## For some reason, blkid and /dev/disk/by-uuid are not getting along well.
+ ## And blkid is wrong in terms of LUKS.
+ #UUID = sys_command('blkid -s PARTUUID -o value {drive}{partition_2}'.format(**args)).decode('UTF-8').strip()
+ with open(f'{self.mountpoint}/boot/loader/entries/arch.conf', 'w') as entry:
+ entry.write('title Arch Linux\n')
+ entry.write('linux /vmlinuz-linux\n')
+ entry.write('initrd /initramfs-linux.img\n')
+ ## blkid doesn't trigger on loopback devices really well,
+ ## so we'll use the old manual method until we get that sorted out.
+ # UUID = simple_command(f"blkid -s PARTUUID -o value /dev/{os.path.basename(args['drive'])}{args['partitions']['2']}").decode('UTF-8').strip()
+ # entry.write('options root=PARTUUID={UUID} rw intel_pstate=no_hwp\n'.format(UUID=UUID))
+ for root, folders, uids in os.walk('/dev/disk/by-uuid'):
+ for uid in uids:
+ real_path = os.path.realpath(os.path.join(root, uid))
+ if not os.path.basename(real_path) == os.path.basename(partition.path): continue
+
+ entry.write(f'options cryptdevice=UUID={uid}:luksdev root=/dev/mapper/luksdev rw intel_pstate=no_hwp\n')
+ return True
+ break
+ raise RequirementError(f'Could not identify the UUID of {partition}, there for {self.mountpoint}/boot/loader/entries/arch.conf will be broken until fixed.')
+
+ def add_additional_packages(self, *packages):
+ self.pacstrap(*packages)
+
+ def install_profile(self, profile):
+ profile = Profile(self, profile)
+
+ print(f'Installing network profile {profile}')
+ profile.install()
+
+ def user_create(self, user :str, password=None, groups=[]):
+ print(f'Creating user {user}')
+ o = b''.join(sys_command(f'/usr/bin/arch-chroot {self.mountpoint} useradd -m -G wheel {user}'))
+ if password:
+ self.user_set_pw(user, password)
+ if groups:
+ for group in groups:
+ o = b''.join(sys_command(f'/usr/bin/arch-chroot {self.mountpoint} gpasswd -a {user} {group}'))
+
+ def user_set_pw(self, user, password):
+ print(f'Setting password for {user}')
+ o = b''.join(sys_command(f"/usr/bin/arch-chroot {self.mountpoint} sh -c \"echo '{user}:{password}' | chpasswd\""))
+ pass
+
+ def add_AUR_support(self):
+ print(f'Building and installing yay support into {self.mountpoint}')
+ o = b''.join(sys_command(f'/usr/bin/arch-chroot {self.mountpoint} sh -c "useradd -m -G wheel aibuilder"'))
+ o = b''.join(sys_command(f"/usr/bin/sed -i 's/# %wheel ALL=(ALL) NO/%wheel ALL=(ALL) NO/' {self.mountpoint}/etc/sudoers"))
+
+ o = b''.join(sys_command(f'/usr/bin/arch-chroot {self.mountpoint} sh -c "su - aibuilder -c \\"(cd /home/aibuilder; git clone https://aur.archlinux.org/yay.git)\\""'))
+ o = b''.join(sys_command(f'/usr/bin/arch-chroot {self.mountpoint} sh -c "chown -R aibuilder.aibuilder /home/aibuilder/yay"'))
+ o = b''.join(sys_command(f'/usr/bin/arch-chroot {self.mountpoint} sh -c "su - aibuilder -c \\"(cd /home/aibuilder/yay; makepkg -si --noconfirm)\\" >/dev/null"'))
+
+ o = b''.join(sys_command(f'/usr/bin/arch-chroot {self.mountpoint} sh -c "userdel aibuilder; rm -rf /hoem/aibuilder"')) \ No newline at end of file
diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py
new file mode 100644
index 00000000..707eeeab
--- /dev/null
+++ b/archinstall/lib/luks.py
@@ -0,0 +1,53 @@
+import os
+from .exceptions import *
+from .general import sys_command
+from .disk import Partition
+
+class luks2():
+ def __init__(self, partition, mountpoint, password, *args, **kwargs):
+ self.password = password
+ self.partition = partition
+ self.mountpoint = mountpoint
+ self.args = args
+ self.kwargs = kwargs
+
+ def __enter__(self):
+ key_file = self.encrypt(self.partition, self.password, *self.args, **self.kwargs)
+ return self.unlock(self.partition, self.mountpoint, key_file)
+
+ def __exit__(self, *args, **kwargs):
+ # TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager
+ if len(args) >= 2 and args[1]:
+ raise args[1]
+ return True
+
+ def encrypt(self, partition, password, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):
+ print(f'Encrypting {partition}')
+ if not key_file: key_file = f'/tmp/{os.path.basename(self.partition.path)}.disk_pw' #TODO: Make disk-pw-file randomly unique?
+ if type(password) != bytes: password = bytes(password, 'UTF-8')
+
+ with open(key_file, 'wb') as fh:
+ fh.write(password)
+
+ o = b''.join(sys_command(f'/usr/bin/cryptsetup -q -v --type luks2 --pbkdf argon2i --hash {hash_type} --key-size {key_size} --iter-time {iter_time} --key-file {os.path.abspath(key_file)} --use-urandom luksFormat {partition.path}'))
+ if not b'Command successful.' in o:
+ raise DiskError(f'Could not encrypt volume "{partition.path}": {o}')
+
+ return key_file
+
+ def unlock(self, partition, mountpoint, key_file):
+ """
+ Mounts a lukts2 compatible partition to a certain mountpoint.
+ Keyfile must be specified as there's no way to interact with the pw-prompt atm.
+
+ :param mountpoint: The name without absolute path, for instance "luksdev" will point to /dev/mapper/luksdev
+ :type mountpoint: str
+ """
+ if '/' in mountpoint: os.path.basename(mountpoint) # TODO: Raise exception instead?
+ sys_command(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')
+ if os.path.islink(f'/dev/mapper/{mountpoint}'):
+ return Partition(f'/dev/mapper/{mountpoint}')
+
+ def close(self, mountpoint):
+ sys_command(f'cryptsetup close /dev/mapper/{mountpoint}')
+ return os.path.islink(f'/dev/mapper/{mountpoint}') is False \ No newline at end of file
diff --git a/archinstall/lib/profiles.py b/archinstall/lib/profiles.py
new file mode 100644
index 00000000..bea17d44
--- /dev/null
+++ b/archinstall/lib/profiles.py
@@ -0,0 +1,195 @@
+import os, urllib.request, urllib.parse, ssl, json
+from collections import OrderedDict
+from .general import multisplit, sys_command, log
+from .exceptions import *
+
+UPSTREAM_URL = 'https://raw.githubusercontent.com/Torxed/archinstall/master/profiles'
+
+def grab_url_data(path):
+ safe_path = path[:path.find(':')+1]+''.join([item if item in ('/', '?', '=', '&') else urllib.parse.quote(item) for item in multisplit(path[path.find(':')+1:], ('/', '?', '=', '&'))])
+ ssl_context = ssl.create_default_context()
+ ssl_context.check_hostname = False
+ ssl_context.verify_mode=ssl.CERT_NONE
+ response = urllib.request.urlopen(safe_path, context=ssl_context)
+ return response.read()
+
+def get_application_instructions(target):
+ instructions = {}
+
+ for path in ['./', './profiles', '/etc/archinstall', '/etc/archinstall/profiles']:
+ if os.path.isfile(f'{path}/applications/{target}.json'):
+ return os.path.abspath(f'{path}/{self.name}.json')
+
+ try:
+ if (cache := grab_url_data(f'{UPSTREAM_URL}/{self.name}.json')):
+ self._cache = cache
+ return f'{UPSTREAM_URL}/{self.name}.json'
+ except urllib.error.HTTPError:
+ pass
+ try:
+ if (cache := grab_url_data(f'{UPSTREAM_URL}/applications/{self.name}.json')):
+ self._cache = cache
+ return f'{UPSTREAM_URL}/applications/{self.name}.json'
+ except urllib.error.HTTPError:
+ pass
+
+ try:
+ instructions = grab_url_data(f'{UPSTREAM_URL}/applications/{target}.json').decode('UTF-8')
+ print('[N] Found application instructions for: {}'.format(target))
+ except urllib.error.HTTPError:
+ print('[N] Could not find remote instructions. yrying local instructions under ./profiles/applications')
+ local_path = './profiles/applications' if os.path.isfile('./archinstall.py') else './archinstall/profiles/applications' # Dangerous assumption
+ if os.path.isfile(f'{local_path}/{target}.json'):
+ with open(f'{local_path}/{target}.json', 'r') as fh:
+ instructions = fh.read()
+
+ print('[N] Found local application instructions for: {}'.format(target))
+ else:
+ print('[N] No instructions found for: {}'.format(target))
+ return instructions
+
+ try:
+ instructions = json.loads(instructions, object_pairs_hook=oDict)
+ except:
+ print('[E] JSON syntax error in {}'.format('{}/applications/{}.json'.format(args['profiles-path'], target)))
+ traceback.print_exc()
+ exit(1)
+
+ return instructions
+
+class Profile():
+ def __init__(self, installer, name, args={}):
+ self.name = name
+ self.installer = installer
+ self._cache = None
+ self.args = args
+
+ def __repr__(self, *args, **kwargs):
+ return f'Profile({self.name} <"{self.path}">)'
+
+ @property
+ def path(self, *args, **kwargs):
+ for path in ['./', './profiles', '/etc/archinstall', '/etc/archinstall/profiles']:
+ if os.path.isfile(f'{path}/{self.name}.json'):
+ return os.path.abspath(f'{path}/{self.name}.json')
+
+ try:
+ if (cache := grab_url_data(f'{UPSTREAM_URL}/{self.name}.json')):
+ self._cache = cache
+ return f'{UPSTREAM_URL}/{self.name}.json'
+ except urllib.error.HTTPError:
+ pass
+ try:
+ if (cache := grab_url_data(f'{UPSTREAM_URL}/{self.name}.json')):
+ self._cache = cache
+ return f'{UPSTREAM_URL}/{self.name}.json'
+ except urllib.error.HTTPError:
+ pass
+
+ return None
+
+ def load_instructions(self):
+ if (absolute_path := self.path):
+ if absolute_path[:4] == 'http':
+ return json.loads(self._cache)
+
+ with open(absolute_path, 'r') as fh:
+ return json.load(fh)
+
+ raise ProfileError(f'No such profile ({self.name}) was found either locally or in {UPSTREAM_URL}')
+
+ def install(self):
+ instructions = self.load_instructions()
+ if 'args' in instructions:
+ self.args = instructions['args']
+ if 'post' in instructions:
+ instructions = instructions['post']
+
+ for title in instructions:
+ log(f'Running post installation step {title}')
+
+ print('[N] Network Deploy: {}'.format(title))
+ if type(instructions[title]) == str:
+ print('[N] Loading {} configuration'.format(instructions[title]))
+ log(f'Loading {instructions[title]} configuration')
+ instructions[title] = Application(self.installer, instructions[title], args=self.args)
+ instructions[title].install()
+ else:
+ for command in instructions[title]:
+ raw_command = command
+ opts = instructions[title][command] if type(instructions[title][command]) in (dict, OrderedDict) else {}
+ if len(opts):
+ if 'pass-args' in opts or 'format' in opts:
+ command = command.format(**self.args)
+ ## FIXME: Instead of deleting the two options
+ ## in order to mute command output further down,
+ ## check for a 'debug' flag per command and delete these two
+ if 'pass-args' in opts:
+ del(opts['pass-args'])
+ elif 'format' in opts:
+ del(opts['format'])
+
+ if 'pass-args' in opts and opts['pass-args']:
+ command = command.format(**self.args)
+
+ if 'runas' in opts and f'su - {opts["runas"]} -c' not in command:
+ command = command.replace('"', '\\"')
+ command = f'su - {opts["runas"]} -c "{command}"'
+
+ if 'no-chroot' in opts and opts['no-chroot']:
+ log(f'Executing {command} as simple command from live-cd.')
+ o = sys_command(command, opts)
+ elif 'chroot' in opts and opts['chroot']:
+ log(f'Executing {command} in chroot.')
+ ## Run in a manually set up version of arch-chroot (arch-chroot will break namespaces).
+ ## This is a bit risky in case the file systems changes over the years, but we'll probably be safe adding this as an option.
+ ## **> Prefer if possible to use 'no-chroot' instead which "live boots" the OS and runs the command.
+ o = sys_command(f"mount /dev/mapper/luksdev {self.installer.mountpoint}")
+ o = sys_command(f"cd {self.installer.mountpoint}; cp /etc/resolv.conf etc")
+ o = sys_command(f"cd {self.installer.mountpoint}; mount -t proc /proc proc")
+ o = sys_command(f"cd {self.installer.mountpoint}; mount --make-rslave --rbind /sys sys")
+ o = sys_command(f"cd {self.installer.mountpoint}; mount --make-rslave --rbind /dev dev")
+ o = sys_command(f'chroot {self.installer.mountpoint} /bin/bash -c "{command}"')
+ o = sys_command(f"cd {self.installer.mountpoint}; umount -R dev")
+ o = sys_command(f"cd {self.installer.mountpoint}; umount -R sys")
+ o = sys_command(f"cd {self.installer.mountpoint}; umount -R proc")
+ else:
+ if 'boot' in opts and opts['boot']:
+ log(f'Executing {command} in boot mode.')
+ defaults = {
+ 'login:' : 'root\n',
+ 'Password:' : self.args['password']+'\n',
+ f'[root@{self.args["hostname"]} ~]#' : command+'\n',
+ }
+ if not 'events' in opts: opts['events'] = {}
+ events = {**defaults, **opts['events']}
+ del(opts['events'])
+ o = b''.join(sys_command(f'/usr/bin/systemd-nspawn -D {self.installer.mountpoint} -b --machine temporary', events=events))
+ else:
+ log(f'Executing {command} in with systemd-nspawn without boot.')
+ o = b''.join(sys_command(f'/usr/bin/systemd-nspawn -D {self.installer.mountpoint} --machine temporary {command}'))
+ if type(instructions[title][raw_command]) == bytes and len(instructions['post'][title][raw_command]) and not instructions['post'][title][raw_command] in o:
+ log(f'{command} failed: {o.decode("UTF-8")}')
+ print('[W] Post install command failed: {}'.format(o.decode('UTF-8')))
+
+class Application(Profile):
+ @property
+ def path(self, *args, **kwargs):
+ for path in ['./applications', './profiles/applications', '/etc/archinstall/applications', '/etc/archinstall/profiles/applications']:
+ if os.path.isfile(f'{path}/{self.name}.json'):
+ return os.path.abspath(f'{path}/{self.name}.json')
+
+ try:
+ if (cache := grab_url_data(f'{UPSTREAM_URL}/{self.name}.json')):
+ self._cache = cache
+ return f'{UPSTREAM_URL}/{self.name}.json'
+ except urllib.error.HTTPError:
+ pass
+ try:
+ if (cache := grab_url_data(f'{UPSTREAM_URL}/applications/{self.name}.json')):
+ self._cache = cache
+ return f'{UPSTREAM_URL}/applications/{self.name}.json'
+ except urllib.error.HTTPError:
+ pass
+
+ return None \ No newline at end of file
diff --git a/archinstall/lib/user_interaction.py b/archinstall/lib/user_interaction.py
new file mode 100644
index 00000000..bd6d117c
--- /dev/null
+++ b/archinstall/lib/user_interaction.py
@@ -0,0 +1,17 @@
+from .exceptions import *
+
+def select_disk(dict_o_disks):
+ drives = sorted(list(dict_o_disks.keys()))
+ if len(drives) > 1:
+ for index, drive in enumerate(drives):
+ print(f"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})")
+ drive = input('Select one of the above disks (by number or full path): ')
+ if drive.isdigit():
+ drive = dict_o_disks[drives[int(drive)]]
+ elif drive in dict_o_disks:
+ drive = dict_o_disks[drive]
+ else:
+ raise DiskError(f'Selected drive does not exist: "{drive}"')
+ return drive
+
+ raise DiskError('select_disk() requires a non-empty dictionary of disks to select from.') \ No newline at end of file
diff --git a/deployments/applications/awesome.json b/deployments/applications/awesome.json
deleted file mode 100644
index 81480309..00000000
--- a/deployments/applications/awesome.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "sed -i 's/^twm &/#&/' /etc/X11/xinit/xinitrc" : null,
- "sed -i 's/^xclock/#&/' /etc/X11/xinit/xinitrc" : null,
- "sed -i 's/^xterm/#&/' /etc/X11/xinit/xinitrc" : null,
- "sed -i 's/^exec xterm/#&/' /etc/X11/xinit/xinitrc" : null,
- "sh -c \"echo 'xscreensaver -no-splash &' >> /etc/X11/xinit/xinitrc\"" : null,
- "sh -c \"echo 'exec {_window_manager}' >> /etc/X11/xinit/xinitrc\"" : {"pass-args" : true},
- "sed -i 's/xterm/xterm -ls -xrm \\'XTerm*selectToClipboard: true\\'/' /mnt/etc/xdg/awesome/rc.lua" : {"no-chroot" : true},
- "sed -i 's/{ \"open terminal\", terminal/{ \"Chromium\", \"chromium\" },\n &/' /mnt/etc/xdg/awesome/rc.lua" : {"no-chroot" : true},
- "sed -i 's/{ \"open terminal\", terminal/{ \"File handler\", \"nemo\" },\n &/' /mnt/etc/xdg/awesome/rc.lua" : {"no-chroot" : true},
- "sed -i 's/^globalkeys = gears.table.join(/&\n awful.key({ modkey, }, \"l\", function() awful.spawn(\"xscreensaver-command -lock &\") end),\n/' /mnt/etc/xdg/awesome/rc.lua" : {"no-chroot" : true},
- "awk -i inplace -v RS='' '{gsub(/awful.key\\({ modkey,.*?}, \"Tab\",.*?\"client\"}\\),/, \"awful.key({ modkey, }, \"Tab\",\n function ()\n awful.client.focus.byidx(-1)\n if client.focus then\n client.focus:raise()\n end\n end),\n awful.key({ modkey, \"Shift\" }, \"Tab\",\n function ()\n awful.client.focus.byidx(1)\n if client.focus then\n client.focus.raise()\n end\n end),\"); print}' /mnt/etc/xdg/awesome/rc.lua" : {"no-chroot" : true},
- "gsettings set org.nemo.desktop show-desktop-icons false" : null,
- "xdg-mime default nemo.desktop inode/directory application/x-gnome-saved-search" : null
-}
diff --git a/description.jpg b/docs/description.jpg
index b05daf2b..b05daf2b 100644
--- a/description.jpg
+++ b/docs/description.jpg
Binary files differ
diff --git a/logo.png b/docs/logo.png
index ac3ed4e8..ac3ed4e8 100644
--- a/logo.png
+++ b/docs/logo.png
Binary files differ
diff --git a/logo.psd b/docs/logo.psd
index d23965b9..d23965b9 100644
--- a/logo.psd
+++ b/docs/logo.psd
Binary files differ
diff --git a/examples/main_example.py b/examples/main_example.py
new file mode 100644
index 00000000..195ee60c
--- /dev/null
+++ b/examples/main_example.py
@@ -0,0 +1,32 @@
+import archinstall, getpass
+
+# Unmount and close previous runs
+archinstall.sys_command(f'umount -R /mnt', surpress_errors=True)
+archinstall.sys_command(f'cryptsetup close /dev/mapper/luksloop', surpress_errors=True)
+
+# Select a harddrive and a disk password
+harddrive = archinstall.select_disk(archinstall.all_disks())
+disk_password = getpass.getpass(prompt='Disk password (won\'t echo): ')
+
+with archinstall.Filesystem(harddrive, archinstall.GPT) as fs:
+ # Use the entire disk instead of setting up partitions on your own
+ fs.use_entire_disk('luks2')
+
+ if harddrive.partition[1].size == '512M':
+ raise OSError('Trying to encrypt the boot partition for petes sake..')
+ harddrive.partition[0].format('fat32')
+
+ with archinstall.luks2(harddrive.partition[1], 'luksloop', disk_password) as unlocked_device:
+ unlocked_device.format('btrfs')
+
+ with archinstall.Installer(unlocked_device, hostname='testmachine') as installation:
+ if installation.minimal_installation():
+ installation.add_bootloader(harddrive.partition[0])
+
+ installation.add_additional_packages(['nano', 'wget', 'git'])
+ installation.install_profile('workstation')
+
+ installation.user_create('anton', 'test')
+ installation.user_set_pw('root', 'toor')
+
+ installation.add_AUR_support() \ No newline at end of file
diff --git a/deployments/00:01:23:45:67:89.json b/profiles/00:01:23:45:67:89.json
index 23f83653..23f83653 100644
--- a/deployments/00:01:23:45:67:89.json
+++ b/profiles/00:01:23:45:67:89.json
diff --git a/deployments/00:11:22:33:44:55.json b/profiles/00:11:22:33:44:55.json
index 909b4256..909b4256 100644
--- a/deployments/00:11:22:33:44:55.json
+++ b/profiles/00:11:22:33:44:55.json
diff --git a/deployments/38:00:25:5a:ed:d5.json b/profiles/38:00:25:5a:ed:d5.json
index 3a8e1fb8..3a8e1fb8 100644
--- a/deployments/38:00:25:5a:ed:d5.json
+++ b/profiles/38:00:25:5a:ed:d5.json
diff --git a/profiles/applications/awesome.json b/profiles/applications/awesome.json
new file mode 100644
index 00000000..42715e6f
--- /dev/null
+++ b/profiles/applications/awesome.json
@@ -0,0 +1,17 @@
+{
+ "Installing awesome window manager" : {
+ "sed -i 's/^twm &/#&/' /etc/X11/xinit/xinitrc" : null,
+ "sed -i 's/^xclock/#&/' /etc/X11/xinit/xinitrc" : null,
+ "sed -i 's/^xterm/#&/' /etc/X11/xinit/xinitrc" : null,
+ "sed -i 's/^exec xterm/#&/' /etc/X11/xinit/xinitrc" : null,
+ "sh -c \"echo 'xscreensaver -no-splash &' >> /etc/X11/xinit/xinitrc\"" : null,
+ "sh -c \"echo 'exec {_window_manager}' >> /etc/X11/xinit/xinitrc\"" : {"pass-args" : true},
+ "sed -i 's/xterm/xterm -ls -xrm \"XTerm*selectToClipboard: true\"/' /mnt/etc/xdg/awesome/rc.lua" : {"no-chroot" : true},
+ "sed -i 's/{ \"open terminal\", terminal/{ \"Chromium\", \"chromium\" },\n &/' /mnt/etc/xdg/awesome/rc.lua" : {"no-chroot" : true},
+ "sed -i 's/{ \"open terminal\", terminal/{ \"File handler\", \"nemo\" },\n &/' /mnt/etc/xdg/awesome/rc.lua" : {"no-chroot" : true},
+ "sed -i 's/^globalkeys = gears.table.join(/&\n awful.key({ modkey, }, \"l\", function() awful.spawn(\"xscreensaver-command -lock &\") end),\n/' /mnt/etc/xdg/awesome/rc.lua" : {"no-chroot" : true},
+ "awk -i inplace -v RS='' '{gsub(/awful.key\\({ modkey,.*?}, \"Tab\",.*?\"client\"}\\),/, \"awful.key({ modkey, }, \"Tab\",\n function ()\n awful.client.focus.byidx(-1)\n if client.focus then\n client.focus:raise()\n end\n end),\n awful.key({ modkey, \"Shift\" }, \"Tab\",\n function ()\n awful.client.focus.byidx(1)\n if client.focus then\n client.focus.raise()\n end\n end),\"); print}' /mnt/etc/xdg/awesome/rc.lua" : {"no-chroot" : true},
+ "gsettings set org.nemo.desktop show-desktop-icons false" : null,
+ "xdg-mime default nemo.desktop inode/directory application/x-gnome-saved-search" : null
+ }
+}
diff --git a/deployments/applications/gnome.json b/profiles/applications/gnome.json
index 4b568544..4b568544 100644
--- a/deployments/applications/gnome.json
+++ b/profiles/applications/gnome.json
diff --git a/deployments/applications/kde.json b/profiles/applications/kde.json
index 4b568544..4b568544 100644
--- a/deployments/applications/kde.json
+++ b/profiles/applications/kde.json
diff --git a/deployments/applications/postgresql.json b/profiles/applications/postgresql.json
index 05976fd9..05976fd9 100644
--- a/deployments/applications/postgresql.json
+++ b/profiles/applications/postgresql.json
diff --git a/deployments/default.json b/profiles/default.json
index cd205f84..cd205f84 100644
--- a/deployments/default.json
+++ b/profiles/default.json
diff --git a/deployments/desktop_gnome.json b/profiles/desktop_gnome.json
index be239a91..be239a91 100644
--- a/deployments/desktop_gnome.json
+++ b/profiles/desktop_gnome.json
diff --git a/deployments/desktop_kde.json b/profiles/desktop_kde.json
index 6a15bf30..6a15bf30 100644
--- a/deployments/desktop_kde.json
+++ b/profiles/desktop_kde.json
diff --git a/deployments/dns_server.json b/profiles/dns_server.json
index 423fe872..423fe872 100644
--- a/deployments/dns_server.json
+++ b/profiles/dns_server.json
diff --git a/deployments/gitea.json b/profiles/gitea.json
index efb4c15e..efb4c15e 100644
--- a/deployments/gitea.json
+++ b/profiles/gitea.json
diff --git a/deployments/local_mirror.json b/profiles/local_mirror.json
index 79347f8b..79347f8b 100644
--- a/deployments/local_mirror.json
+++ b/profiles/local_mirror.json
diff --git a/deployments/minimal_example.json b/profiles/minimal_example.json
index ec5e7d1c..ec5e7d1c 100644
--- a/deployments/minimal_example.json
+++ b/profiles/minimal_example.json
diff --git a/deployments/pentest.json b/profiles/pentest.json
index 900836ce..900836ce 100644
--- a/deployments/pentest.json
+++ b/profiles/pentest.json
diff --git a/deployments/router.json b/profiles/router.json
index 48e038c0..48e038c0 100644
--- a/deployments/router.json
+++ b/profiles/router.json
diff --git a/deployments/ubuntu.json b/profiles/ubuntu.json
index be239a91..be239a91 100644
--- a/deployments/ubuntu.json
+++ b/profiles/ubuntu.json
diff --git a/deployments/vmhost.json b/profiles/vmhost.json
index 0b2dabec..0b2dabec 100644
--- a/deployments/vmhost.json
+++ b/profiles/vmhost.json
diff --git a/deployments/webserver.json b/profiles/webserver.json
index 6925ab00..6925ab00 100644
--- a/deployments/webserver.json
+++ b/profiles/webserver.json
diff --git a/deployments/workstation.json b/profiles/workstation.json
index 37216b0e..37216b0e 100644
--- a/deployments/workstation.json
+++ b/profiles/workstation.json
diff --git a/deployments/workstation_aur.json b/profiles/workstation_aur.json
index d3c26672..d3c26672 100644
--- a/deployments/workstation_aur.json
+++ b/profiles/workstation_aur.json
diff --git a/deployments/workstation_unattended.json b/profiles/workstation_unattended.json
index 089f7f40..089f7f40 100644
--- a/deployments/workstation_unattended.json
+++ b/profiles/workstation_unattended.json
diff --git a/setup.py b/setup.py
new file mode 100644
index 00000000..81666ffb
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,22 @@
+import setuptools
+
+with open("README.md", "r") as fh:
+ long_description = fh.read()
+
+setuptools.setup(
+ name="archinstall",
+ version="2.0.1",
+ author="Anton Hvornum",
+ author_email="anton@hvornum.se",
+ description="Arch Linux installer - guided, templates etc.",
+ long_description=long_description,
+ long_description_content_type="text/markdown",
+ url="https://github.com/Torxed/archinstall",
+ packages=setuptools.find_packages(),
+ classifiers=[
+ "Programming Language :: Python :: 3.8",
+ "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
+ "Operating System :: POSIX :: Linux",
+ ],
+ python_requires='>=3.8',
+) \ No newline at end of file
diff --git a/test_archinstall.py b/test_archinstall.py
deleted file mode 100644
index 30bc76e7..00000000
--- a/test_archinstall.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import json
-import archinstall
-
-archinstall.update_drive_list(emulate=False)
-archinstall.setup_args_defaults(archinstall.args, interactive=False)
-#for drive in archinstall.harddrives:
-# print(drive, archinstall.human_disk_info(drive))
-
-instructions = archinstall.load_automatic_instructions(emulate=False)
-profile_instructions = archinstall.get_instructions('workstation', emulate=False)
-profile_instructions = archinstall.merge_in_includes(profile_instructions, emulate=False)
-archinstall.args['password'] = 'test'
-
-print(json.dumps(archinstall.args, indent=4)) \ No newline at end of file