update to kubernetes 1.27.4

pull/742/head
Alessandro Lenzen 2023-08-01 15:48:20 +02:00
parent 79a3f79b27
commit af7ffdb8e6
21 changed files with 1181 additions and 1226 deletions

58
.gitignore vendored
View File

@ -1,50 +1,8 @@
admin-csr.json *.csr
admin-key.pem *.json
admin.csr *.kubeconfig
admin.pem *.pem
admin.kubeconfig *.yaml
ca-config.json cfssl
ca-csr.json cfssljson
ca-key.pem kubectl
ca.csr
ca.pem
encryption-config.yaml
kube-controller-manager-csr.json
kube-controller-manager-key.pem
kube-controller-manager.csr
kube-controller-manager.kubeconfig
kube-controller-manager.pem
kube-scheduler-csr.json
kube-scheduler-key.pem
kube-scheduler.csr
kube-scheduler.kubeconfig
kube-scheduler.pem
kube-proxy-csr.json
kube-proxy-key.pem
kube-proxy.csr
kube-proxy.kubeconfig
kube-proxy.pem
kubernetes-csr.json
kubernetes-key.pem
kubernetes.csr
kubernetes.pem
worker-0-csr.json
worker-0-key.pem
worker-0.csr
worker-0.kubeconfig
worker-0.pem
worker-1-csr.json
worker-1-key.pem
worker-1.csr
worker-1.kubeconfig
worker-1.pem
worker-2-csr.json
worker-2-key.pem
worker-2.csr
worker-2.kubeconfig
worker-2.pem
service-account-key.pem
service-account.csr
service-account.pem
service-account-csr.json
*.swp

View File

@ -1,8 +1,10 @@
# Contributing
This project is made possible by contributors like YOU! While all contributions are welcomed, please be sure and follow the following suggestions to help your PR get merged. This project is made possible by contributors like YOU! While all contributions are welcomed, please be sure and follow the following suggestions to help your PR get merged.
## License ## License
This project uses an [Apache license](LICENSE). Be sure you're comfortable with the implications of that before working up a patch. This project uses a [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International](./LICENSE). Be sure you're comfortable with the implications of that before working up a patch.
## Review and merge process ## Review and merge process
@ -15,4 +17,3 @@ Here are some examples of the review and justification process:
## Notes on minutiae ## Notes on minutiae
If you find a bug that breaks the guide, please do submit it. If you are considering a minor copy edit for tone, grammar, or simple inconsistent whitespace, consider the tradeoff between maintainer time and community benefit before investing too much of your time. If you find a bug that breaks the guide, please do submit it. If you are considering a minor copy edit for tone, grammar, or simple inconsistent whitespace, consider the tradeoff between maintainer time and community benefit before investing too much of your time.

View File

@ -1,3 +1,3 @@
# Copyright # Copyright
<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License</a> <a rel="license" href="https://creativecommons.org/licenses/by-nc-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="https://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License</a>.

574
LICENSE
View File

@ -1,202 +1,438 @@
Attribution-NonCommercial-ShareAlike 4.0 International
Apache License =======================================================================
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
1. Definitions. Using Creative Commons Public Licenses
"License" shall mean the terms and conditions for use, reproduction, Creative Commons public licenses provide a standard set of terms and
and distribution as defined by Sections 1 through 9 of this document. conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
"Licensor" shall mean the copyright owner or entity authorized by Considerations for licensors: Our public licenses are
the copyright owner that is granting the License. intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
"Legal Entity" shall mean the union of the acting entity and all Considerations for the public: By using one of our public
other entities that control, are controlled by, or are under common licenses, a licensor grants the public permission to use the
control with that entity. For the purposes of this definition, licensed material under specified terms and conditions. If
"control" means (i) the power, direct or indirect, to cause the the licensor's permission is not necessary for any reason--for
direction or management of such entity, whether by contract or example, because of any applicable exception or limitation to
otherwise, or (ii) ownership of fifty percent (50%) or more of the copyright--then that use is not regulated by the license. Our
outstanding shares, or (iii) beneficial ownership of such entity. licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
"You" (or "Your") shall mean an individual or Legal Entity =======================================================================
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
including but not limited to software source code, documentation Public License
source, and configuration files.
"Object" form shall mean any form resulting from mechanical By exercising the Licensed Rights (defined below), You accept and agree
transformation or translation of a Source form, including but to be bound by the terms and conditions of this Creative Commons
not limited to compiled object code, generated documentation, Attribution-NonCommercial-ShareAlike 4.0 International Public License
and conversions to other media types. ("Public License"). To the extent this Public License may be
interpreted as a contract, You are granted the Licensed Rights in
consideration of Your acceptance of these terms and conditions, and the
Licensor grants You such rights in consideration of benefits the
Licensor receives from making the Licensed Material available under
these terms and conditions.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object Section 1 -- Definitions.
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including a. Adapted Material means material subject to Copyright and Similar
the original version of the Work and any modifications or additions Rights that is derived from or based upon the Licensed Material
to that Work or Derivative Works thereof, that is intentionally and in which the Licensed Material is translated, altered,
submitted to Licensor for inclusion in the Work by the copyright owner arranged, transformed, or otherwise modified in a manner requiring
or by an individual or Legal Entity authorized to submit on behalf of permission under the Copyright and Similar Rights held by the
the copyright owner. For the purposes of this definition, "submitted" Licensor. For purposes of this Public License, where the Licensed
means any form of electronic, verbal, or written communication sent Material is a musical work, performance, or sound recording,
to the Licensor or its representatives, including but not limited to Adapted Material is always produced where the Licensed Material is
communication on electronic mailing lists, source code control systems, synched in timed relation with a moving image.
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity b. Adapter's License means the license You apply to Your Copyright
on behalf of whom a Contribution has been received by Licensor and and Similar Rights in Your contributions to Adapted Material in
subsequently incorporated within the Work. accordance with the terms and conditions of this Public License.
2. Grant of Copyright License. Subject to the terms and conditions of c. BY-NC-SA Compatible License means a license listed at
this License, each Contributor hereby grants to You a perpetual, creativecommons.org/compatiblelicenses, approved by Creative
worldwide, non-exclusive, no-charge, royalty-free, irrevocable Commons as essentially the equivalent of this Public License.
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of d. Copyright and Similar Rights means copyright and/or similar rights
this License, each Contributor hereby grants to You a perpetual, closely related to copyright including, without limitation,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable performance, broadcast, sound recording, and Sui Generis Database
(except as stated in this section) patent license to make, have made, Rights, without regard to how the rights are labeled or
use, offer to sell, sell, import, and otherwise transfer the Work, categorized. For purposes of this Public License, the rights
where such license applies only to those patent claims licensable specified in Section 2(b)(1)-(2) are not Copyright and Similar
by such Contributor that are necessarily infringed by their Rights.
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the e. Effective Technological Measures means those measures that, in the
Work or Derivative Works thereof in any medium, with or without absence of proper authority, may not be circumvented under laws
modifications, and in Source or Object form, provided that You fulfilling obligations under Article 11 of the WIPO Copyright
meet the following conditions: Treaty adopted on December 20, 1996, and/or similar international
agreements.
(a) You must give any other recipients of the Work or f. Exceptions and Limitations means fair use, fair dealing, and/or
Derivative Works a copy of this License; and any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
(b) You must cause any modified files to carry prominent notices g. License Elements means the license attributes listed in the name
stating that You changed the files; and of a Creative Commons Public License. The License Elements of this
Public License are Attribution, NonCommercial, and ShareAlike.
(c) You must retain, in the Source form of any Derivative Works h. Licensed Material means the artistic or literary work, database,
that You distribute, all copyright, patent, trademark, and or other material to which the Licensor applied this Public
attribution notices from the Source form of the Work, License.
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its i. Licensed Rights means the rights granted to You subject to the
distribution, then any Derivative Works that You distribute must terms and conditions of this Public License, which are limited to
include a readable copy of the attribution notices contained all Copyright and Similar Rights that apply to Your use of the
within such NOTICE file, excluding those notices that do not Licensed Material and that the Licensor has authority to license.
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and j. Licensor means the individual(s) or entity(ies) granting rights
may provide additional or different license terms and conditions under this Public License.
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise, k. NonCommercial means not primarily intended for or directed towards
any Contribution intentionally submitted for inclusion in the Work commercial advantage or monetary compensation. For purposes of
by You to the Licensor shall be under the terms and conditions of this Public License, the exchange of the Licensed Material for
this License, without any additional terms or conditions. other material subject to Copyright and Similar Rights by digital
Notwithstanding the above, nothing herein shall supersede or modify file-sharing or similar means is NonCommercial provided there is
the terms of any separate license agreement you may have executed no payment of monetary compensation in connection with the
with Licensor regarding such Contributions. exchange.
6. Trademarks. This License does not grant permission to use the trade l. Share means to provide material to the public by any means or
names, trademarks, service marks, or product names of the Licensor, process that requires permission under the Licensed Rights, such
except as required for reasonable and customary use in describing the as reproduction, public display, public performance, distribution,
origin of the Work and reproducing the content of the NOTICE file. dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
7. Disclaimer of Warranty. Unless required by applicable law or m. Sui Generis Database Rights means rights other than copyright
agreed to in writing, Licensor provides the Work (and each resulting from Directive 96/9/EC of the European Parliament and of
Contributor provides its Contributions) on an "AS IS" BASIS, the Council of 11 March 1996 on the legal protection of databases,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or as amended and/or succeeded, as well as other essentially
implied, including, without limitation, any warranties or conditions equivalent rights anywhere in the world.
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, n. You means the individual or entity exercising the Licensed Rights
whether in tort (including negligence), contract, or otherwise, under this Public License. Your has a corresponding meaning.
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS Section 2 -- Scope.
APPENDIX: How to apply the Apache License to your work. a. License grant.
To apply the Apache License to your work, attach the following 1. Subject to the terms and conditions of this Public License,
boilerplate notice, with the fields enclosed by brackets "[]" the Licensor hereby grants You a worldwide, royalty-free,
replaced with your own identifying information. (Don't include non-sublicensable, non-exclusive, irrevocable license to
the brackets!) The text should be enclosed in the appropriate exercise the Licensed Rights in the Licensed Material to:
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner] a. reproduce and Share the Licensed Material, in whole or
in part, for NonCommercial purposes only; and
Licensed under the Apache License, Version 2.0 (the "License"); b. produce, reproduce, and Share Adapted Material for
you may not use this file except in compliance with the License. NonCommercial purposes only.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. Additional offer from the Licensor -- Adapted Material.
Every recipient of Adapted Material from You
automatically receives an offer from the Licensor to
exercise the Licensed Rights in the Adapted Material
under the conditions of the Adapter's License You apply.
c. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties, including when
the Licensed Material is used other than for NonCommercial
purposes.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
b. ShareAlike.
In addition to the conditions in Section 3(a), if You Share
Adapted Material You produce, the following conditions also apply.
1. The Adapter's License You apply must be a Creative Commons
license with the same License Elements, this version or
later, or a BY-NC-SA Compatible License.
2. You must include the text of, or the URI or hyperlink to, the
Adapter's License You apply. You may satisfy this condition
in any reasonable manner based on the medium, means, and
context in which You Share Adapted Material.
3. You may not offer or impose any additional or different terms
or conditions on, or apply any Effective Technological
Measures to, Adapted Material that restrict exercise of the
rights granted under the Adapter's License You apply.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database for NonCommercial purposes
only;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material,
including for purposes of Section 3(b); and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
will be considered the “Licensor.” The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the
public licenses.
Creative Commons may be contacted at creativecommons.org.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,6 +1,6 @@
# Kubernetes The Hard Way # Kubernetes The Hard Way
This tutorial walks you through setting up Kubernetes the hard way. This guide is not for people looking for a fully automated command to bring up a Kubernetes cluster. If that's you then check out [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine), or the [Getting Started Guides](https://kubernetes.io/docs/setup). This tutorial walks you through setting up Kubernetes the hard way. This guide is not for people looking for a fully automated command to bring up a Kubernetes cluster. If that's you then check out [Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine), or the [Getting started](https://kubernetes.io/docs/setup/) section of the Kubernetes documentation.
Kubernetes The Hard Way is optimized for learning, which means taking the long route to ensure you understand each task required to bootstrap a Kubernetes cluster. Kubernetes The Hard Way is optimized for learning, which means taking the long route to ensure you understand each task required to bootstrap a Kubernetes cluster.
@ -8,8 +8,7 @@ Kubernetes The Hard Way is optimized for learning, which means taking the long r
## Copyright ## Copyright
<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License</a>. <a rel="license" href="https://creativecommons.org/licenses/by-nc-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="https://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License</a>.
## Target Audience ## Target Audience
@ -19,27 +18,28 @@ The target audience for this tutorial is someone planning to support a productio
Kubernetes The Hard Way guides you through bootstrapping a highly available Kubernetes cluster with end-to-end encryption between components and RBAC authentication. Kubernetes The Hard Way guides you through bootstrapping a highly available Kubernetes cluster with end-to-end encryption between components and RBAC authentication.
* [kubernetes](https://github.com/kubernetes/kubernetes) v1.21.0 * [etcd](https://github.com/etcd-io/etcd) v3.5.9
* [containerd](https://github.com/containerd/containerd) v1.4.4 * [containerd](https://github.com/containerd/containerd) v1.7.3
* [coredns](https://github.com/coredns/coredns) v1.8.3 * [runc](https://github.com/opencontainers/runc) v1.1.8
* [cni](https://github.com/containernetworking/cni) v0.9.1 * [cni-plugins](https://github.com/containernetworking/plugins) v1.3.0
* [etcd](https://github.com/etcd-io/etcd) v3.4.15 * [kubernetes](https://github.com/kubernetes/kubernetes) v1.27.4
* [coredns](https://github.com/coredns/coredns) v1.10.1
## Labs ## Labs
This tutorial assumes you have access to the [Google Cloud Platform](https://cloud.google.com). While GCP is used for basic infrastructure requirements the lessons learned in this tutorial can be applied to other platforms. This tutorial assumes you have access to the [Google Cloud Platform (GCP)](https://cloud.google.com). While GCP is used for basic infrastructure requirements the lessons learned in this tutorial can be applied to other platforms.
* [Prerequisites](docs/01-prerequisites.md) * [Prerequisites](./docs/01-prerequisites.md)
* [Installing the Client Tools](docs/02-client-tools.md) * [Installing the Client Tools](./docs/02-client-tools.md)
* [Provisioning Compute Resources](docs/03-compute-resources.md) * [Provisioning Compute Resources](./docs/03-compute-resources.md)
* [Provisioning the CA and Generating TLS Certificates](docs/04-certificate-authority.md) * [Provisioning the CA and Generating TLS Certificates](./docs/04-certificate-authority.md)
* [Generating Kubernetes Configuration Files for Authentication](docs/05-kubernetes-configuration-files.md) * [Generating Kubernetes Configuration Files for Authentication](./docs/05-kubernetes-configuration-files.md)
* [Generating the Data Encryption Config and Key](docs/06-data-encryption-keys.md) * [Generating the Data Encryption Config and Key](./docs/06-data-encryption-keys.md)
* [Bootstrapping the etcd Cluster](docs/07-bootstrapping-etcd.md) * [Bootstrapping the etcd Cluster](./docs/07-bootstrapping-etcd.md)
* [Bootstrapping the Kubernetes Control Plane](docs/08-bootstrapping-kubernetes-controllers.md) * [Bootstrapping the Kubernetes Control Plane](./docs/08-bootstrapping-kubernetes-controllers.md)
* [Bootstrapping the Kubernetes Worker Nodes](docs/09-bootstrapping-kubernetes-workers.md) * [Bootstrapping the Kubernetes Worker Nodes](./docs/09-bootstrapping-kubernetes-workers.md)
* [Configuring kubectl for Remote Access](docs/10-configuring-kubectl.md) * [Configuring kubectl for Remote Access](./docs/10-configuring-kubectl.md)
* [Provisioning Pod Network Routes](docs/11-pod-network-routes.md) * [Provisioning Pod Network Routes](./docs/11-pod-network-routes.md)
* [Deploying the DNS Cluster Add-on](docs/12-dns-addon.md) * [Deploying the DNS Cluster Add-on](./docs/12-dns-addon.md)
* [Smoke Test](docs/13-smoke-test.md) * [Smoke Test](./docs/13-smoke-test.md)
* [Cleaning Up](docs/14-cleanup.md) * [Cleaning Up](./docs/14-cleanup.md)

View File

@ -1,206 +0,0 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.32.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain=cluster.local.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --no-negcache
- --log-facility=-
- --server=/cluster.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns

View File

@ -8,18 +8,31 @@ This tutorial leverages the [Google Cloud Platform](https://cloud.google.com/) t
> The compute resources required for this tutorial exceed the Google Cloud Platform free tier. > The compute resources required for this tutorial exceed the Google Cloud Platform free tier.
## Google Cloud Platform SDK ## Google Cloud Command Line Interface (gcloud CLI)
### Install the Google Cloud SDK ### Install the Google Cloud CLI
Follow the Google Cloud SDK [documentation](https://cloud.google.com/sdk/) to install and configure the `gcloud` command line utility. Follow the gcloud CLI [documentation](https://cloud.google.com/cli) to install and configure the `gcloud` command line utility.
Verify the Google Cloud SDK version is 338.0.0 or higher: Verify the Google Cloud SDK version is 440.0.0 or higher:
``` ```
gcloud version gcloud version
``` ```
> output
```
Google Cloud SDK 440.0.0
alpha 2023.07.21
beta 2023.07.21
bq 2.0.94
bundled-python3-unix 3.9.16
core 2023.07.21
gcloud-crc32c 1.0.0
gsutil 5.25
```
### Set a Default Compute Region and Zone ### Set a Default Compute Region and Zone
This tutorial assumes a default compute region and zone have been configured. This tutorial assumes a default compute region and zone have been configured.
@ -36,28 +49,31 @@ Then be sure to authorize gcloud to access the Cloud Platform with your Google u
gcloud auth login gcloud auth login
``` ```
Next set a default compute region and compute zone: Next set a default compute region and zone in your local client
``` ```
gcloud config set compute/region us-west1 REGION='us-east1'
```
Set a default compute zone: ZONE='us-east1-d'
``` gcloud config set compute/region "${REGION}"
gcloud config set compute/zone us-west1-c
gcloud config set compute/zone "${ZONE}"
gcloud compute project-info add-metadata \
--metadata "google-compute-default-region=${REGION},google-compute-default-zone=${ZONE}"
``` ```
> Use the `gcloud compute zones list` command to view additional regions and zones. > Use the `gcloud compute zones list` command to view additional regions and zones.
## Running Commands in Parallel with tmux ## Running Commands in Parallel with tmux
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. Labs in this tutorial may require running the same commands across multiple compute instances, in those cases consider using tmux and splitting a window into multiple panes with synchronize-panes enabled to speed up the provisioning process. [tmux](https://tmux.github.io/) can be used to run commands on multiple compute instances at the same time. Labs in this tutorial may require running the same commands across multiple compute instances, in those cases consider using tmux and splitting a window into multiple panes with synchronize-panes enabled to speed up the provisioning process.
> The use of tmux is optional and not required to complete this tutorial. > The use of tmux is optional and not required to complete this tutorial.
![tmux screenshot](images/tmux-screenshot.png) ![tmux screenshot](./images/tmux-screenshot.png)
> Enable synchronize-panes by pressing `ctrl+b` followed by `shift+:`. Next type `set synchronize-panes on` at the prompt. To disable synchronization: `set synchronize-panes off`. > Enable synchronize-panes by pressing `ctrl+b` followed by `shift+:`. Next type `set synchronize-panes on` at the prompt. To disable synchronization: `set synchronize-panes off`.
Next: [Installing the Client Tools](02-client-tools.md) Next: [Installing the Client Tools](./02-client-tools.md)

View File

@ -1,30 +1,30 @@
# Installing the Client Tools # Installing the Client Tools
In this lab you will install the command line utilities required to complete this tutorial: [cfssl](https://github.com/cloudflare/cfssl), [cfssljson](https://github.com/cloudflare/cfssl), and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl). In this lab you will install the command line utilities required to complete this tutorial: [cfssl, cfssljson](https://github.com/cloudflare/cfssl), and [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl).
## Install CFSSL ## Install CFSSL
The `cfssl` and `cfssljson` command line utilities will be used to provision a [PKI Infrastructure](https://en.wikipedia.org/wiki/Public_key_infrastructure) and generate TLS certificates. The `cfssl` and `cfssljson` command line utilities will be used to provision a [public key infrastructure (PKI)](https://en.wikipedia.org/wiki/Public_key_infrastructure) and generate TLS certificates.
Download and install `cfssl` and `cfssljson`: Download and install `cfssl` and `cfssljson`:
### OS X ### OS X
``` ```
curl -o cfssl https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/darwin/cfssl ARCH='arm64' # replace arm64 with amd64 if needed
curl -o cfssljson https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/darwin/cfssljson
``` curl --location --output cfssl --time-cond cfssl \
"https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssl_1.6.4_darwin_${ARCH}"
curl --location --output cfssljson --time-cond cfssljson \
"https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssljson_1.6.4_linux_${ARCH}"
```
chmod +x cfssl cfssljson chmod +x cfssl cfssljson
```
```
sudo mv cfssl cfssljson /usr/local/bin/ sudo mv cfssl cfssljson /usr/local/bin/
``` ```
Some OS X users may experience problems using the pre-built binaries in which case [Homebrew](https://brew.sh) might be a better option: Some OS X users may experience problems using the pre-built binaries in which case [Homebrew](https://github.com/Homebrew/brew) might be a better option:
``` ```
brew install cfssl brew install cfssl
@ -33,22 +33,18 @@ brew install cfssl
### Linux ### Linux
``` ```
wget -q --show-progress --https-only --timestamping \ curl --location --output cfssl --time-cond cfssl \
https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/linux/cfssl \ https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssl_1.6.4_linux_amd64
https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/linux/cfssljson
```
``` curl --location --output cfssljson --time-cond cfssljson \
chmod +x cfssl cfssljson https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssljson_1.6.4_linux_amd64
```
``` sudo install --mode 0755 cfssl cfssljson /usr/local/bin/
sudo mv cfssl cfssljson /usr/local/bin/
``` ```
### Verification ### Verification
Verify `cfssl` and `cfssljson` version 1.4.1 or higher is installed: Verify `cfssl` and `cfssljson` version 1.6.4 or higher is installed:
``` ```
cfssl version cfssl version
@ -57,16 +53,19 @@ cfssl version
> output > output
``` ```
Version: 1.4.1 Version: 1.6.4
Runtime: go1.12.12 Runtime: go1.18
``` ```
``` ```
cfssljson --version cfssljson --version
``` ```
> output
``` ```
Version: 1.4.1 Version: 1.6.4
Runtime: go1.12.12 Runtime: go1.18
``` ```
## Install kubectl ## Install kubectl
@ -76,43 +75,36 @@ The `kubectl` command line utility is used to interact with the Kubernetes API S
### OS X ### OS X
``` ```
curl -o kubectl https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/darwin/amd64/kubectl curl --location --remote-name --time-cond kubectl \
``` "https://dl.k8s.io/release/v1.27.4/bin/darwin/${ARCH}/kubectl"
```
chmod +x kubectl chmod +x kubectl
```
```
sudo mv kubectl /usr/local/bin/ sudo mv kubectl /usr/local/bin/
``` ```
### Linux ### Linux
``` ```
wget https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl curl --location --remote-name --time-cond kubectl \
``` https://dl.k8s.io/release/v1.27.4/bin/linux/amd64/kubectl
``` sudo install --mode 0755 kubectl /usr/local/bin/
chmod +x kubectl
```
```
sudo mv kubectl /usr/local/bin/
``` ```
### Verification ### Verification
Verify `kubectl` version 1.21.0 or higher is installed: Verify `kubectl` version 1.27.4 or higher is installed:
``` ```
kubectl version --client kubectl version --client --short
``` ```
> output > output
``` ```
Client Version: version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.0", GitCommit:"cb303e613a121a29364f75cc67d3d580833a7479", GitTreeState:"clean", BuildDate:"2021-04-08T16:31:21Z", GoVersion:"go1.16.1", Compiler:"gc", Platform:"linux/amd64"} Client Version: v1.27.4
Kustomize Version: v5.0.1
``` ```
Next: [Provisioning Compute Resources](03-compute-resources.md) Next: [Provisioning Compute Resources](./03-compute-resources.md)

View File

@ -1,18 +1,18 @@
# Provisioning Compute Resources # Provisioning Compute Resources
Kubernetes requires a set of machines to host the Kubernetes control plane and the worker nodes where containers are ultimately run. In this lab you will provision the compute resources required for running a secure and highly available Kubernetes cluster across a single [compute zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones). Kubernetes requires a set of machines to host the Kubernetes control plane and the worker nodes where containers are ultimately run. In this lab you will provision the compute resources required for running a secure and highly available Kubernetes cluster across a single [compute zone](https://cloud.google.com/compute/docs/regions-zones).
> Ensure a default compute zone and region have been set as described in the [Prerequisites](01-prerequisites.md#set-a-default-compute-region-and-zone) lab. > Ensure a default compute zone and region have been set as described in the [Prerequisites](./01-prerequisites.md#set-a-default-compute-region-and-zone) lab.
## Networking ## Networking
The Kubernetes [networking model](https://kubernetes.io/docs/concepts/cluster-administration/networking/#kubernetes-model) assumes a flat network in which containers and nodes can communicate with each other. In cases where this is not desired [network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) can limit how groups of containers are allowed to communicate with each other and external network endpoints. The Kubernetes [network model](https://kubernetes.io/docs/concepts/services-networking/#the-kubernetes-network-model) assumes a flat network in which containers and nodes can communicate with each other. In cases where this is not desired [network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) can limit how groups of containers are allowed to communicate with each other and external network endpoints.
> Setting up network policies is out of scope for this tutorial. > Setting up network policies is out of scope for this tutorial.
### Virtual Private Cloud Network ### Virtual Private Cloud (VPC) Network
In this section a dedicated [Virtual Private Cloud](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) (VPC) network will be setup to host the Kubernetes cluster. In this section a dedicated [VPC network](https://cloud.google.com/vpc/docs/vpc) will be setup to host the Kubernetes cluster.
Create the `kubernetes-the-hard-way` custom VPC network: Create the `kubernetes-the-hard-way` custom VPC network:
@ -20,7 +20,7 @@ Create the `kubernetes-the-hard-way` custom VPC network:
gcloud compute networks create kubernetes-the-hard-way --subnet-mode custom gcloud compute networks create kubernetes-the-hard-way --subnet-mode custom
``` ```
A [subnet](https://cloud.google.com/compute/docs/vpc/#vpc_networks_and_subnets) must be provisioned with an IP address range large enough to assign a private IP address to each node in the Kubernetes cluster. A [subnet](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets) must be provisioned with an IP address range large enough to assign a private IP address to each node in the Kubernetes cluster.
Create the `kubernetes` subnet in the `kubernetes-the-hard-way` VPC network: Create the `kubernetes` subnet in the `kubernetes-the-hard-way` VPC network:
@ -52,12 +52,12 @@ gcloud compute firewall-rules create kubernetes-the-hard-way-allow-external \
--source-ranges 0.0.0.0/0 --source-ranges 0.0.0.0/0
``` ```
> An [external load balancer](https://cloud.google.com/compute/docs/load-balancing/network/) will be used to expose the Kubernetes API Servers to remote clients. > An [external load balancer](https://cloud.google.com/load-balancing/docs/network) will be used to expose the Kubernetes API Servers to remote clients.
List the firewall rules in the `kubernetes-the-hard-way` VPC network: List the firewall rules in the `kubernetes-the-hard-way` VPC network:
``` ```
gcloud compute firewall-rules list --filter="network:kubernetes-the-hard-way" gcloud compute firewall-rules list --filter network:kubernetes-the-hard-way
``` ```
> output > output
@ -65,7 +65,7 @@ gcloud compute firewall-rules list --filter="network:kubernetes-the-hard-way"
``` ```
NAME NETWORK DIRECTION PRIORITY ALLOW DENY DISABLED NAME NETWORK DIRECTION PRIORITY ALLOW DENY DISABLED
kubernetes-the-hard-way-allow-external kubernetes-the-hard-way INGRESS 1000 tcp:22,tcp:6443,icmp False kubernetes-the-hard-way-allow-external kubernetes-the-hard-way INGRESS 1000 tcp:22,tcp:6443,icmp False
kubernetes-the-hard-way-allow-internal kubernetes-the-hard-way INGRESS 1000 tcp,udp,icmp Fals kubernetes-the-hard-way-allow-internal kubernetes-the-hard-way INGRESS 1000 tcp,udp,icmp False
``` ```
### Kubernetes Public IP Address ### Kubernetes Public IP Address
@ -73,26 +73,25 @@ kubernetes-the-hard-way-allow-internal kubernetes-the-hard-way INGRESS 1000
Allocate a static IP address that will be attached to the external load balancer fronting the Kubernetes API Servers: Allocate a static IP address that will be attached to the external load balancer fronting the Kubernetes API Servers:
``` ```
gcloud compute addresses create kubernetes-the-hard-way \ gcloud compute addresses create kubernetes-the-hard-way
--region $(gcloud config get-value compute/region)
``` ```
Verify the `kubernetes-the-hard-way` static IP address was created in your default compute region: Verify the `kubernetes-the-hard-way` static IP address was created in your default compute region:
``` ```
gcloud compute addresses list --filter="name=('kubernetes-the-hard-way')" gcloud compute addresses list --filter name=kubernetes-the-hard-way
``` ```
> output > output
``` ```
NAME ADDRESS/RANGE TYPE PURPOSE NETWORK REGION SUBNET STATUS NAME ADDRESS/RANGE TYPE PURPOSE NETWORK REGION SUBNET STATUS
kubernetes-the-hard-way XX.XXX.XXX.XXX EXTERNAL us-west1 RESERVED kubernetes-the-hard-way XX.XXX.XXX.XXX EXTERNAL us-east1 RESERVED
``` ```
## Compute Instances ## Compute Instances
The compute instances in this lab will be provisioned using [Ubuntu Server](https://www.ubuntu.com/server) 20.04, which has good support for the [containerd container runtime](https://github.com/containerd/containerd). Each compute instance will be provisioned with a fixed private IP address to simplify the Kubernetes bootstrapping process. The compute instances in this lab will be provisioned using [Ubuntu Server 22.04 LTS](https://ubuntu.com/server), which has good support for the [containerd](https://github.com/containerd/containerd) container runtime. Each compute instance will be provisioned with a fixed private IP address to simplify the Kubernetes bootstrapping process.
### Kubernetes Controllers ### Kubernetes Controllers
@ -100,14 +99,14 @@ Create three compute instances which will host the Kubernetes control plane:
``` ```
for i in 0 1 2; do for i in 0 1 2; do
gcloud compute instances create controller-${i} \ gcloud compute instances create "controller-${i}" \
--async \ --async \
--boot-disk-size 200GB \ --boot-disk-size 200GB \
--can-ip-forward \ --can-ip-forward \
--image-family ubuntu-2004-lts \ --image-family ubuntu-2204-lts \
--image-project ubuntu-os-cloud \ --image-project ubuntu-os-cloud \
--machine-type e2-standard-2 \ --machine-type e2-standard-2 \
--private-network-ip 10.240.0.1${i} \ --private-network-ip "10.240.0.1${i}" \
--scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \ --scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \
--subnet kubernetes \ --subnet kubernetes \
--tags kubernetes-the-hard-way,controller --tags kubernetes-the-hard-way,controller
@ -124,15 +123,15 @@ Create three compute instances which will host the Kubernetes worker nodes:
``` ```
for i in 0 1 2; do for i in 0 1 2; do
gcloud compute instances create worker-${i} \ gcloud compute instances create "worker-${i}" \
--async \ --async \
--boot-disk-size 200GB \ --boot-disk-size 200GB \
--can-ip-forward \ --can-ip-forward \
--image-family ubuntu-2004-lts \ --image-family ubuntu-2204-lts \
--image-project ubuntu-os-cloud \ --image-project ubuntu-os-cloud \
--machine-type e2-standard-2 \ --machine-type e2-standard-2 \
--metadata pod-cidr=10.200.${i}.0/24 \ --metadata "pod-cidr=10.200.${i}.0/24" \
--private-network-ip 10.240.0.2${i} \ --private-network-ip "10.240.0.2${i}" \
--scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \ --scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \
--subnet kubernetes \ --subnet kubernetes \
--tags kubernetes-the-hard-way,worker --tags kubernetes-the-hard-way,worker
@ -144,24 +143,24 @@ done
List the compute instances in your default compute zone: List the compute instances in your default compute zone:
``` ```
gcloud compute instances list --filter="tags.items=kubernetes-the-hard-way" gcloud compute instances list --filter tags.items=kubernetes-the-hard-way
``` ```
> output > output
``` ```
NAME ZONE MACHINE_TYPE PREEMPTIBLE INTERNAL_IP EXTERNAL_IP STATUS NAME ZONE MACHINE_TYPE PREEMPTIBLE INTERNAL_IP EXTERNAL_IP STATUS
controller-0 us-west1-c e2-standard-2 10.240.0.10 XX.XX.XX.XXX RUNNING controller-0 us-east1-d e2-standard-2 10.240.0.10 XX.XXX.XX.XXX RUNNING
controller-1 us-west1-c e2-standard-2 10.240.0.11 XX.XXX.XXX.XX RUNNING controller-1 us-east1-d e2-standard-2 10.240.0.11 XX.XXX.XX.XXX RUNNING
controller-2 us-west1-c e2-standard-2 10.240.0.12 XX.XXX.XX.XXX RUNNING controller-2 us-east1-d e2-standard-2 10.240.0.12 XX.XXX.XX.XX RUNNING
worker-0 us-west1-c e2-standard-2 10.240.0.20 XX.XX.XXX.XXX RUNNING worker-0 us-east1-d e2-standard-2 10.240.0.20 XX.XX.XX.XXX RUNNING
worker-1 us-west1-c e2-standard-2 10.240.0.21 XX.XX.XX.XXX RUNNING worker-1 us-east1-d e2-standard-2 10.240.0.21 XX.XXX.XXX.XXX RUNNING
worker-2 us-west1-c e2-standard-2 10.240.0.22 XX.XXX.XX.XX RUNNING worker-2 us-east1-d e2-standard-2 10.240.0.22 XX.XXX.XXX.XX RUNNING
``` ```
## Configuring SSH Access ## Configuring SSH Access
SSH will be used to configure the controller and worker instances. When connecting to compute instances for the first time SSH keys will be generated for you and stored in the project or instance metadata as described in the [connecting to instances](https://cloud.google.com/compute/docs/instances/connecting-to-instance) documentation. SSH will be used to configure the controller and worker instances. When connecting to compute instances for the first time SSH keys will be generated for you and stored in the project or instance metadata as described in the [connecting to Linux VMs](https://cloud.google.com/compute/docs/connect/standard-ssh) documentation.
Test SSH access to the `controller-0` compute instances: Test SSH access to the `controller-0` compute instances:
@ -172,8 +171,8 @@ gcloud compute ssh controller-0
If this is your first time connecting to a compute instance SSH keys will be generated for you. Enter a passphrase at the prompt to continue: If this is your first time connecting to a compute instance SSH keys will be generated for you. Enter a passphrase at the prompt to continue:
``` ```
WARNING: The public SSH key file for gcloud does not exist.
WARNING: The private SSH key file for gcloud does not exist. WARNING: The private SSH key file for gcloud does not exist.
WARNING: The public SSH key file for gcloud does not exist.
WARNING: You do not have an SSH key for gcloud. WARNING: You do not have an SSH key for gcloud.
WARNING: SSH keygen will be executed to generate a key. WARNING: SSH keygen will be executed to generate a key.
Generating public/private rsa key pair. Generating public/private rsa key pair.
@ -184,23 +183,23 @@ Enter same passphrase again:
At this point the generated SSH keys will be uploaded and stored in your project: At this point the generated SSH keys will be uploaded and stored in your project:
``` ```
Your identification has been saved in /home/$USER/.ssh/google_compute_engine. Your identification has been saved in "/home/${USER}/.ssh/google_compute_engine"
Your public key has been saved in /home/$USER/.ssh/google_compute_engine.pub. Your public key has been saved in "/home/${USER}/.ssh/google_compute_engine.pub"
The key fingerprint is: The key fingerprint is:
SHA256:nz1i8jHmgQuGt+WscqP5SeIaSy5wyIJeL71MuV+QruE $USER@$HOSTNAME SHA256:OvopaMrkGOrbB0u2JMdwDvH6wGQBieKUC+XRAAm07RI "${USER}@${HOSTNAME}"
The key's randomart image is: The key's randomart image is:
+---[RSA 2048]----+ +---[RSA 3072]----+
| | |O*=o |
| | |**o.. |
| | |=E*. |
| . | | Boo |
|o. oS | |+.B. S |
|=... .o .o o | | =.O . |
|+.+ =+=.+.X o | |..O.+ o |
|.+ ==O*B.B = . | |*.++.o o |
| .+.=EB++ o | |=B..ooo |
+----[SHA256]-----+ +----[SHA256]-----+
Updating project ssh metadata...-Updated [https://www.googleapis.com/compute/v1/projects/$PROJECT_ID]. Updating project ssh metadata...Updated ["https://www.googleapis.com/compute/v1/projects/${PROJECT_ID}"].
Updating project ssh metadata...done. Updating project ssh metadata...done.
Waiting for SSH key to propagate. Waiting for SSH key to propagate.
``` ```
@ -208,20 +207,21 @@ Waiting for SSH key to propagate.
After the SSH keys have been updated you'll be logged into the `controller-0` instance: After the SSH keys have been updated you'll be logged into the `controller-0` instance:
``` ```
Welcome to Ubuntu 20.04.2 LTS (GNU/Linux 5.4.0-1042-gcp x86_64) Welcome to Ubuntu 22.04.2 LTS (GNU/Linux 5.19.0-1027-gcp x86_64)
... ...
``` ```
Type `exit` at the prompt to exit the `controller-0` compute instance: Type `exit` at the prompt to exit the `controller-0` compute instance:
``` ```
$USER@controller-0:~$ exit exit
``` ```
> output > output
``` ```
logout logout
Connection to XX.XX.XX.XXX closed Connection to XX.XXX.XX.XXX closed.
``` ```
Next: [Provisioning a CA and Generating TLS Certificates](04-certificate-authority.md) Next: [Provisioning a CA and Generating TLS Certificates](./04-certificate-authority.md)

View File

@ -1,17 +1,15 @@
# Provisioning a CA and Generating TLS Certificates # Provisioning a CA and Generating TLS Certificates
In this lab you will provision a [PKI Infrastructure](https://en.wikipedia.org/wiki/Public_key_infrastructure) using CloudFlare's PKI toolkit, [cfssl](https://github.com/cloudflare/cfssl), then use it to bootstrap a Certificate Authority, and generate TLS certificates for the following components: etcd, kube-apiserver, kube-controller-manager, kube-scheduler, kubelet, and kube-proxy. In this lab you will provision a [public key infrastructure (PKI)](https://en.wikipedia.org/wiki/Public_key_infrastructure) using [CloudFlare's PKI/TLS toolkit](https://github.com/cloudflare/cfssl), then use it to bootstrap a Certificate Authority (CA), and generate TLS certificates for the following components: etcd, kube-apiserver, kube-controller-manager, kube-scheduler, kubelet, and kube-proxy.
## Certificate Authority ## Certificate Authority
In this section you will provision a Certificate Authority that can be used to generate additional TLS certificates. In this section you will provision a CA that can be used to generate additional TLS certificates.
Generate the CA configuration file, certificate, and private key: Generate the CA configuration file, certificate, and private key:
``` ```
{ cat <<EOF >ca-config.json
cat > ca-config.json <<EOF
{ {
"signing": { "signing": {
"default": { "default": {
@ -27,7 +25,7 @@ cat > ca-config.json <<EOF
} }
EOF EOF
cat > ca-csr.json <<EOF cat <<EOF >ca-csr.json
{ {
"CN": "Kubernetes", "CN": "Kubernetes",
"key": { "key": {
@ -46,9 +44,8 @@ cat > ca-csr.json <<EOF
} }
EOF EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca cfssl gencert -initca ca-csr.json \
| cfssljson -bare ca
}
``` ```
Results: Results:
@ -67,9 +64,7 @@ In this section you will generate client and server certificates for each Kubern
Generate the `admin` client certificate and private key: Generate the `admin` client certificate and private key:
``` ```
{ cat <<EOF >admin-csr.json
cat > admin-csr.json <<EOF
{ {
"CN": "admin", "CN": "admin",
"key": { "key": {
@ -89,13 +84,12 @@ cat > admin-csr.json <<EOF
EOF EOF
cfssl gencert \ cfssl gencert \
-ca=ca.pem \ -ca ca.pem \
-ca-key=ca-key.pem \ -ca-key ca-key.pem \
-config=ca-config.json \ -config ca-config.json \
-profile=kubernetes \ -profile kubernetes \
admin-csr.json | cfssljson -bare admin admin-csr.json \
| cfssljson -bare admin
}
``` ```
Results: Results:
@ -107,13 +101,13 @@ admin.pem
### The Kubelet Client Certificates ### The Kubelet Client Certificates
Kubernetes uses a [special-purpose authorization mode](https://kubernetes.io/docs/admin/authorization/node/) called Node Authorizer, that specifically authorizes API requests made by [Kubelets](https://kubernetes.io/docs/concepts/overview/components/#kubelet). In order to be authorized by the Node Authorizer, Kubelets must use a credential that identifies them as being in the `system:nodes` group, with a username of `system:node:<nodeName>`. In this section you will create a certificate for each Kubernetes worker node that meets the Node Authorizer requirements. Kubernetes uses a [special-purpose authorization mode](https://kubernetes.io/docs/reference/access-authn-authz/node/) called Node Authorizer, that specifically authorizes API requests made by [Kubelets](https://kubernetes.io/docs/concepts/overview/components/#kubelet). In order to be authorized by the Node Authorizer, Kubelets must use a credential that identifies them as being in the `system:nodes` group, with a username of `system:node:<nodeName>`. In this section you will create a certificate for each Kubernetes worker node that meets the Node Authorizer requirements.
Generate a certificate and private key for each Kubernetes worker node: Generate a certificate and private key for each Kubernetes worker node:
``` ```
for instance in worker-0 worker-1 worker-2; do for instance in worker-0 worker-1 worker-2; do
cat > ${instance}-csr.json <<EOF cat <<EOF >"${instance}-csr.json"
{ {
"CN": "system:node:${instance}", "CN": "system:node:${instance}",
"key": { "key": {
@ -132,19 +126,20 @@ cat > ${instance}-csr.json <<EOF
} }
EOF EOF
EXTERNAL_IP=$(gcloud compute instances describe ${instance} \ EXTERNAL_IP="$(gcloud compute instances describe ${instance} \
--format 'value(networkInterfaces[0].accessConfigs[0].natIP)') --format 'value(networkInterfaces[0].accessConfigs[0].natIP)')"
INTERNAL_IP=$(gcloud compute instances describe ${instance} \ INTERNAL_IP="$(gcloud compute instances describe ${instance} \
--format 'value(networkInterfaces[0].networkIP)') --format 'value(networkInterfaces[0].networkIP)')"
cfssl gencert \ cfssl gencert \
-ca=ca.pem \ -ca ca.pem \
-ca-key=ca-key.pem \ -ca-key ca-key.pem \
-config=ca-config.json \ -config ca-config.json \
-hostname=${instance},${EXTERNAL_IP},${INTERNAL_IP} \ -hostname "${instance},${EXTERNAL_IP},${INTERNAL_IP}" \
-profile=kubernetes \ -profile kubernetes \
${instance}-csr.json | cfssljson -bare ${instance} "${instance}-csr.json" \
| cfssljson -bare "${instance}"
done done
``` ```
@ -164,9 +159,7 @@ worker-2.pem
Generate the `kube-controller-manager` client certificate and private key: Generate the `kube-controller-manager` client certificate and private key:
``` ```
{ cat <<EOF >kube-controller-manager-csr.json
cat > kube-controller-manager-csr.json <<EOF
{ {
"CN": "system:kube-controller-manager", "CN": "system:kube-controller-manager",
"key": { "key": {
@ -186,13 +179,12 @@ cat > kube-controller-manager-csr.json <<EOF
EOF EOF
cfssl gencert \ cfssl gencert \
-ca=ca.pem \ -ca ca.pem \
-ca-key=ca-key.pem \ -ca-key ca-key.pem \
-config=ca-config.json \ -config ca-config.json \
-profile=kubernetes \ -profile kubernetes \
kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager kube-controller-manager-csr.json \
| cfssljson -bare kube-controller-manager
}
``` ```
Results: Results:
@ -202,15 +194,12 @@ kube-controller-manager-key.pem
kube-controller-manager.pem kube-controller-manager.pem
``` ```
### The Kube Proxy Client Certificate ### The Kube Proxy Client Certificate
Generate the `kube-proxy` client certificate and private key: Generate the `kube-proxy` client certificate and private key:
``` ```
{ cat <<EOF >kube-proxy-csr.json
cat > kube-proxy-csr.json <<EOF
{ {
"CN": "system:kube-proxy", "CN": "system:kube-proxy",
"key": { "key": {
@ -230,13 +219,12 @@ cat > kube-proxy-csr.json <<EOF
EOF EOF
cfssl gencert \ cfssl gencert \
-ca=ca.pem \ -ca ca.pem \
-ca-key=ca-key.pem \ -ca-key ca-key.pem \
-config=ca-config.json \ -config ca-config.json \
-profile=kubernetes \ -profile kubernetes \
kube-proxy-csr.json | cfssljson -bare kube-proxy kube-proxy-csr.json \
| cfssljson -bare kube-proxy
}
``` ```
Results: Results:
@ -251,9 +239,7 @@ kube-proxy.pem
Generate the `kube-scheduler` client certificate and private key: Generate the `kube-scheduler` client certificate and private key:
``` ```
{ cat <<EOF >kube-scheduler-csr.json
cat > kube-scheduler-csr.json <<EOF
{ {
"CN": "system:kube-scheduler", "CN": "system:kube-scheduler",
"key": { "key": {
@ -273,13 +259,12 @@ cat > kube-scheduler-csr.json <<EOF
EOF EOF
cfssl gencert \ cfssl gencert \
-ca=ca.pem \ -ca ca.pem \
-ca-key=ca-key.pem \ -ca-key ca-key.pem \
-config=ca-config.json \ -config ca-config.json \
-profile=kubernetes \ -profile kubernetes \
kube-scheduler-csr.json | cfssljson -bare kube-scheduler kube-scheduler-csr.json \
| cfssljson -bare kube-scheduler
}
``` ```
Results: Results:
@ -289,7 +274,6 @@ kube-scheduler-key.pem
kube-scheduler.pem kube-scheduler.pem
``` ```
### The Kubernetes API Server Certificate ### The Kubernetes API Server Certificate
The `kubernetes-the-hard-way` static IP address will be included in the list of subject alternative names for the Kubernetes API Server certificate. This will ensure the certificate can be validated by remote clients. The `kubernetes-the-hard-way` static IP address will be included in the list of subject alternative names for the Kubernetes API Server certificate. This will ensure the certificate can be validated by remote clients.
@ -297,15 +281,12 @@ The `kubernetes-the-hard-way` static IP address will be included in the list of
Generate the Kubernetes API Server certificate and private key: Generate the Kubernetes API Server certificate and private key:
``` ```
{ KUBERNETES_PUBLIC_ADDRESS="$(gcloud compute addresses describe kubernetes-the-hard-way \
--format 'value(address)')"
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \ KUBERNETES_HOSTNAMES='kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.svc.cluster.local'
--region $(gcloud config get-value compute/region) \
--format 'value(address)')
KUBERNETES_HOSTNAMES=kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.svc.cluster.local cat <<EOF >kubernetes-csr.json
cat > kubernetes-csr.json <<EOF
{ {
"CN": "kubernetes", "CN": "kubernetes",
"key": { "key": {
@ -325,14 +306,13 @@ cat > kubernetes-csr.json <<EOF
EOF EOF
cfssl gencert \ cfssl gencert \
-ca=ca.pem \ -ca ca.pem \
-ca-key=ca-key.pem \ -ca-key ca-key.pem \
-config=ca-config.json \ -config ca-config.json \
-hostname=10.32.0.1,10.240.0.10,10.240.0.11,10.240.0.12,${KUBERNETES_PUBLIC_ADDRESS},127.0.0.1,${KUBERNETES_HOSTNAMES} \ -hostname "10.32.0.1,10.240.0.10,10.240.0.11,10.240.0.12,${KUBERNETES_PUBLIC_ADDRESS},127.0.0.1,${KUBERNETES_HOSTNAMES}" \
-profile=kubernetes \ -profile kubernetes \
kubernetes-csr.json | cfssljson -bare kubernetes kubernetes-csr.json \
| cfssljson -bare kubernetes
}
``` ```
> The Kubernetes API server is automatically assigned the `kubernetes` internal dns name, which will be linked to the first IP address (`10.32.0.1`) from the address range (`10.32.0.0/24`) reserved for internal cluster services during the [control plane bootstrapping](08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-api-server) lab. > The Kubernetes API server is automatically assigned the `kubernetes` internal dns name, which will be linked to the first IP address (`10.32.0.1`) from the address range (`10.32.0.0/24`) reserved for internal cluster services during the [control plane bootstrapping](08-bootstrapping-kubernetes-controllers.md#configure-the-kubernetes-api-server) lab.
@ -346,14 +326,12 @@ kubernetes.pem
## The Service Account Key Pair ## The Service Account Key Pair
The Kubernetes Controller Manager leverages a key pair to generate and sign service account tokens as described in the [managing service accounts](https://kubernetes.io/docs/admin/service-accounts-admin/) documentation. The Kubernetes Controller Manager leverages a key pair to generate and sign service account tokens as described in the [managing service accounts](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/) documentation.
Generate the `service-account` certificate and private key: Generate the `service-account` certificate and private key:
``` ```
{ cat <<EOF >service-account-csr.json
cat > service-account-csr.json <<EOF
{ {
"CN": "service-accounts", "CN": "service-accounts",
"key": { "key": {
@ -373,13 +351,12 @@ cat > service-account-csr.json <<EOF
EOF EOF
cfssl gencert \ cfssl gencert \
-ca=ca.pem \ -ca ca.pem \
-ca-key=ca-key.pem \ -ca-key ca-key.pem \
-config=ca-config.json \ -config ca-config.json \
-profile=kubernetes \ -profile kubernetes \
service-account-csr.json | cfssljson -bare service-account service-account-csr.json \
| cfssljson -bare service-account
}
``` ```
Results: Results:
@ -389,14 +366,16 @@ service-account-key.pem
service-account.pem service-account.pem
``` ```
## Distribute the Client and Server Certificates ## Distribute the Client and Server Certificates
Copy the appropriate certificates and private keys to each worker instance: Copy the appropriate certificates and private keys to each worker instance:
``` ```
for instance in worker-0 worker-1 worker-2; do for instance in worker-0 worker-1 worker-2; do
gcloud compute scp ca.pem ${instance}-key.pem ${instance}.pem ${instance}:~/ gcloud compute scp \
ca.pem \
"${instance}-key.pem" "${instance}.pem" \
"${instance}:"
done done
``` ```
@ -404,11 +383,14 @@ Copy the appropriate certificates and private keys to each controller instance:
``` ```
for instance in controller-0 controller-1 controller-2; do for instance in controller-0 controller-1 controller-2; do
gcloud compute scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \ gcloud compute scp \
service-account-key.pem service-account.pem ${instance}:~/ ca-key.pem ca.pem \
kubernetes-key.pem kubernetes.pem \
service-account-key.pem service-account.pem \
"${instance}:"
done done
``` ```
> The `kube-proxy`, `kube-controller-manager`, `kube-scheduler`, and `kubelet` client certificates will be used to generate client authentication configuration files in the next lab. > The `kube-proxy`, `kube-controller-manager`, `kube-scheduler`, and `kubelet` client certificates will be used to generate client authentication configuration files in the next lab.
Next: [Generating Kubernetes Configuration Files for Authentication](05-kubernetes-configuration-files.md) Next: [Generating Kubernetes Configuration Files for Authentication](./05-kubernetes-configuration-files.md)

View File

@ -13,39 +13,39 @@ Each kubeconfig requires a Kubernetes API Server to connect to. To support high
Retrieve the `kubernetes-the-hard-way` static IP address: Retrieve the `kubernetes-the-hard-way` static IP address:
``` ```
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \ KUBERNETES_PUBLIC_ADDRESS="$(gcloud compute addresses describe kubernetes-the-hard-way \
--region $(gcloud config get-value compute/region) \ --format 'value(address)')"
--format 'value(address)')
``` ```
### The kubelet Kubernetes Configuration File ### The kubelet Kubernetes Configuration File
When generating kubeconfig files for Kubelets the client certificate matching the Kubelet's node name must be used. This will ensure Kubelets are properly authorized by the Kubernetes [Node Authorizer](https://kubernetes.io/docs/admin/authorization/node/). When generating kubeconfig files for Kubelets the client certificate matching the Kubelet's node name must be used. This will ensure Kubelets are properly authorized by the Kubernetes [Node Authorizer](https://kubernetes.io/docs/reference/access-authn-authz/node/).
> The following commands must be run in the same directory used to generate the SSL certificates during the [Generating TLS Certificates](04-certificate-authority.md) lab. > The following commands must be run in the same directory used to generate the SSL certificates during the [Generating TLS Certificates](./04-certificate-authority.md) lab.
Generate a kubeconfig file for each worker node: Generate a kubeconfig file for each worker node:
``` ```
for instance in worker-0 worker-1 worker-2; do for instance in worker-0 worker-1 worker-2; do
kubectl config set-cluster kubernetes-the-hard-way \ kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=ca.pem \ --certificate-authority ca.pem \
--embed-certs=true \ --embed-certs \
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \ --kubeconfig "${instance}.kubeconfig" \
--kubeconfig=${instance}.kubeconfig --server "https://${KUBERNETES_PUBLIC_ADDRESS}:6443"
kubectl config set-credentials system:node:${instance} \ kubectl config set-credentials "system:node:${instance}" \
--client-certificate=${instance}.pem \ --client-certificate "${instance}.pem" \
--client-key=${instance}-key.pem \ --client-key "${instance}-key.pem" \
--embed-certs=true \ --embed-certs \
--kubeconfig=${instance}.kubeconfig --kubeconfig "${instance}.kubeconfig"
kubectl config set-context default \ kubectl config set-context default \
--cluster=kubernetes-the-hard-way \ --cluster "kubernetes-the-hard-way" \
--user=system:node:${instance} \ --kubeconfig "${instance}.kubeconfig" \
--kubeconfig=${instance}.kubeconfig --user "system:node:${instance}"
kubectl config use-context default --kubeconfig=${instance}.kubeconfig kubectl config use-context default \
--kubeconfig "${instance}.kubeconfig"
done done
``` ```
@ -62,26 +62,25 @@ worker-2.kubeconfig
Generate a kubeconfig file for the `kube-proxy` service: Generate a kubeconfig file for the `kube-proxy` service:
``` ```
{ kubectl config set-cluster kubernetes-the-hard-way \
kubectl config set-cluster kubernetes-the-hard-way \ --certificate-authority ca.pem \
--certificate-authority=ca.pem \ --embed-certs \
--embed-certs=true \ --kubeconfig kube-proxy.kubeconfig \
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \ --server "https://${KUBERNETES_PUBLIC_ADDRESS}:6443"
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials system:kube-proxy \ kubectl config set-credentials system:kube-proxy \
--client-certificate=kube-proxy.pem \ --client-certificate kube-proxy.pem \
--client-key=kube-proxy-key.pem \ --client-key kube-proxy-key.pem \
--embed-certs=true \ --embed-certs \
--kubeconfig=kube-proxy.kubeconfig --kubeconfig kube-proxy.kubeconfig
kubectl config set-context default \ kubectl config set-context default \
--cluster=kubernetes-the-hard-way \ --cluster kubernetes-the-hard-way \
--user=system:kube-proxy \ --kubeconfig kube-proxy.kubeconfig \
--kubeconfig=kube-proxy.kubeconfig --user system:kube-proxy
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig kubectl config use-context default \
} --kubeconfig kube-proxy.kubeconfig
``` ```
Results: Results:
@ -95,26 +94,25 @@ kube-proxy.kubeconfig
Generate a kubeconfig file for the `kube-controller-manager` service: Generate a kubeconfig file for the `kube-controller-manager` service:
``` ```
{ kubectl config set-cluster kubernetes-the-hard-way \
kubectl config set-cluster kubernetes-the-hard-way \ --certificate-authority ca.pem \
--certificate-authority=ca.pem \ --embed-certs \
--embed-certs=true \ --kubeconfig kube-controller-manager.kubeconfig \
--server=https://127.0.0.1:6443 \ --server https://127.0.0.1:6443
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager \ kubectl config set-credentials system:kube-controller-manager \
--client-certificate=kube-controller-manager.pem \ --client-certificate kube-controller-manager.pem \
--client-key=kube-controller-manager-key.pem \ --client-key kube-controller-manager-key.pem \
--embed-certs=true \ --embed-certs \
--kubeconfig=kube-controller-manager.kubeconfig --kubeconfig kube-controller-manager.kubeconfig
kubectl config set-context default \ kubectl config set-context default \
--cluster=kubernetes-the-hard-way \ --cluster kubernetes-the-hard-way \
--user=system:kube-controller-manager \ --kubeconfig kube-controller-manager.kubeconfig \
--kubeconfig=kube-controller-manager.kubeconfig --user system:kube-controller-manager
kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig kubectl config use-context default \
} --kubeconfig kube-controller-manager.kubeconfig
``` ```
Results: Results:
@ -129,26 +127,25 @@ kube-controller-manager.kubeconfig
Generate a kubeconfig file for the `kube-scheduler` service: Generate a kubeconfig file for the `kube-scheduler` service:
``` ```
{ kubectl config set-cluster kubernetes-the-hard-way \
kubectl config set-cluster kubernetes-the-hard-way \ --certificate-authority ca.pem \
--certificate-authority=ca.pem \ --embed-certs \
--embed-certs=true \ --kubeconfig kube-scheduler.kubeconfig \
--server=https://127.0.0.1:6443 \ --server https://127.0.0.1:6443
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \ kubectl config set-credentials system:kube-scheduler \
--client-certificate=kube-scheduler.pem \ --client-certificate kube-scheduler.pem \
--client-key=kube-scheduler-key.pem \ --client-key kube-scheduler-key.pem \
--embed-certs=true \ --embed-certs \
--kubeconfig=kube-scheduler.kubeconfig --kubeconfig kube-scheduler.kubeconfig
kubectl config set-context default \ kubectl config set-context default \
--cluster=kubernetes-the-hard-way \ --cluster kubernetes-the-hard-way \
--user=system:kube-scheduler \ --kubeconfig kube-scheduler.kubeconfig \
--kubeconfig=kube-scheduler.kubeconfig --user system:kube-scheduler
kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig kubectl config use-context default \
} --kubeconfig kube-scheduler.kubeconfig
``` ```
Results: Results:
@ -162,26 +159,25 @@ kube-scheduler.kubeconfig
Generate a kubeconfig file for the `admin` user: Generate a kubeconfig file for the `admin` user:
``` ```
{ kubectl config set-cluster kubernetes-the-hard-way \
kubectl config set-cluster kubernetes-the-hard-way \ --certificate-authority ca.pem \
--certificate-authority=ca.pem \ --embed-certs \
--embed-certs=true \ --kubeconfig admin.kubeconfig \
--server=https://127.0.0.1:6443 \ --server https://127.0.0.1:6443
--kubeconfig=admin.kubeconfig
kubectl config set-credentials admin \ kubectl config set-credentials admin \
--client-certificate=admin.pem \ --client-certificate admin.pem \
--client-key=admin-key.pem \ --client-key admin-key.pem \
--embed-certs=true \ --embed-certs \
--kubeconfig=admin.kubeconfig --kubeconfig admin.kubeconfig
kubectl config set-context default \ kubectl config set-context default \
--cluster=kubernetes-the-hard-way \ --cluster kubernetes-the-hard-way \
--user=admin \ --kubeconfig admin.kubeconfig \
--kubeconfig=admin.kubeconfig --user admin
kubectl config use-context default --kubeconfig=admin.kubeconfig kubectl config use-context default \
} --kubeconfig admin.kubeconfig
``` ```
Results: Results:
@ -190,7 +186,6 @@ Results:
admin.kubeconfig admin.kubeconfig
``` ```
## ##
## Distribute the Kubernetes Configuration Files ## Distribute the Kubernetes Configuration Files
@ -199,7 +194,10 @@ Copy the appropriate `kubelet` and `kube-proxy` kubeconfig files to each worker
``` ```
for instance in worker-0 worker-1 worker-2; do for instance in worker-0 worker-1 worker-2; do
gcloud compute scp ${instance}.kubeconfig kube-proxy.kubeconfig ${instance}:~/ gcloud compute scp \
"${instance}.kubeconfig" \
kube-proxy.kubeconfig \
"${instance}:"
done done
``` ```
@ -207,8 +205,12 @@ Copy the appropriate `kube-controller-manager` and `kube-scheduler` kubeconfig f
``` ```
for instance in controller-0 controller-1 controller-2; do for instance in controller-0 controller-1 controller-2; do
gcloud compute scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig ${instance}:~/ gcloud compute scp \
admin.kubeconfig \
kube-controller-manager.kubeconfig \
kube-scheduler.kubeconfig \
"${instance}:"
done done
``` ```
Next: [Generating the Data Encryption Config and Key](06-data-encryption-keys.md) Next: [Generating the Data Encryption Config and Key](./06-data-encryption-keys.md)

View File

@ -1,6 +1,6 @@
# Generating the Data Encryption Config and Key # Generating the Data Encryption Config and Key
Kubernetes stores a variety of data including cluster state, application configurations, and secrets. Kubernetes supports the ability to [encrypt](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data) cluster data at rest. Kubernetes stores a variety of data including cluster state, application configurations, and secrets. Kubernetes supports the ability to [encrypt](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) cluster data at rest.
In this lab you will generate an encryption key and an [encryption config](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#understanding-the-encryption-at-rest-configuration) suitable for encrypting Kubernetes Secrets. In this lab you will generate an encryption key and an [encryption config](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#understanding-the-encryption-at-rest-configuration) suitable for encrypting Kubernetes Secrets.
@ -9,7 +9,7 @@ In this lab you will generate an encryption key and an [encryption config](https
Generate an encryption key: Generate an encryption key:
``` ```
ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64) ENCRYPTION_KEY="$(head -c 32 /dev/urandom | base64)"
``` ```
## The Encryption Config File ## The Encryption Config File
@ -17,9 +17,9 @@ ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
Create the `encryption-config.yaml` encryption config file: Create the `encryption-config.yaml` encryption config file:
``` ```
cat > encryption-config.yaml <<EOF cat <<EOF >encryption-config.yaml
kind: EncryptionConfig
apiVersion: v1 apiVersion: v1
kind: EncryptionConfig
resources: resources:
- resources: - resources:
- secrets - secrets
@ -36,8 +36,8 @@ Copy the `encryption-config.yaml` encryption config file to each controller inst
``` ```
for instance in controller-0 controller-1 controller-2; do for instance in controller-0 controller-1 controller-2; do
gcloud compute scp encryption-config.yaml ${instance}:~/ gcloud compute scp encryption-config.yaml "${instance}:"
done done
``` ```
Next: [Bootstrapping the etcd Cluster](07-bootstrapping-etcd.md) Next: [Bootstrapping the etcd Cluster](./07-bootstrapping-etcd.md)

View File

@ -12,7 +12,7 @@ gcloud compute ssh controller-0
### Running commands in parallel with tmux ### Running commands in parallel with tmux
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab. [tmux](https://tmux.github.io/) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](./01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab.
## Bootstrapping an etcd Cluster Member ## Bootstrapping an etcd Cluster Member
@ -21,60 +21,61 @@ gcloud compute ssh controller-0
Download the official etcd release binaries from the [etcd](https://github.com/etcd-io/etcd) GitHub project: Download the official etcd release binaries from the [etcd](https://github.com/etcd-io/etcd) GitHub project:
``` ```
wget -q --show-progress --https-only --timestamping \ curl --location --remote-name --time-cond etcd-v3.5.9-linux-amd64.tar.gz \
"https://github.com/etcd-io/etcd/releases/download/v3.4.15/etcd-v3.4.15-linux-amd64.tar.gz" https://github.com/etcd-io/etcd/releases/download/v3.5.9/etcd-v3.5.9-linux-amd64.tar.gz
``` ```
Extract and install the `etcd` server and the `etcdctl` command line utility: Extract and install the `etcd` server and the `etcdctl` command line utility:
``` ```
{ tar --extract --file etcd-v3.5.9-linux-amd64.tar.gz --verbose
tar -xvf etcd-v3.4.15-linux-amd64.tar.gz
sudo mv etcd-v3.4.15-linux-amd64/etcd* /usr/local/bin/ sudo cp etcd-v3.5.9-linux-amd64/etcd* /usr/local/bin/
}
``` ```
### Configure the etcd Server ### Configure the etcd Server
``` ```
{ sudo mkdir --parents /etc/etcd /var/lib/etcd
sudo mkdir -p /etc/etcd /var/lib/etcd
sudo chmod 700 /var/lib/etcd sudo chmod 0700 /etc/etcd/ /var/lib/etcd/
sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/
} sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/
``` ```
The instance internal IP address will be used to serve client requests and communicate with etcd cluster peers. Retrieve the internal IP address for the current compute instance: The instance internal IP address will be used to serve client requests and communicate with etcd cluster peers. Retrieve the internal IP address for the current compute instance:
``` ```
INTERNAL_IP=$(curl -s -H "Metadata-Flavor: Google" \ INTERNAL_IP="$(curl --silent --header 'Metadata-Flavor: Google' \
http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip) http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip)"
``` ```
Each etcd member must have a unique name within an etcd cluster. Set the etcd name to match the hostname of the current compute instance: Each etcd member must have a unique name within an etcd cluster. Set the etcd name to match the hostname of the current compute instance:
``` ```
ETCD_NAME=$(hostname -s) ETCD_NAME="$(hostname --short)"
``` ```
Create the `etcd.service` systemd unit file: Create the `etcd.service` systemd unit file:
``` ```
cat <<EOF | sudo tee /etc/systemd/system/etcd.service sudo mkdir --parents /usr/local/lib/systemd/system
cat <<EOF | sudo tee /usr/local/lib/systemd/system/etcd.service
[Unit] [Unit]
Description=etcd Description=etcd
Documentation=https://github.com/coreos Documentation=https://github.com/etcd-io/etcd
[Service] [Service]
Type=notify Type=notify
ExecStart=/usr/local/bin/etcd \\ ExecStart=/usr/local/bin/etcd \\
--name ${ETCD_NAME} \\ --name ${ETCD_NAME} \\
--cert-file=/etc/etcd/kubernetes.pem \\ --cert-file /etc/etcd/kubernetes.pem \\
--key-file=/etc/etcd/kubernetes-key.pem \\ --key-file /etc/etcd/kubernetes-key.pem \\
--peer-cert-file=/etc/etcd/kubernetes.pem \\ --peer-cert-file /etc/etcd/kubernetes.pem \\
--peer-key-file=/etc/etcd/kubernetes-key.pem \\ --peer-key-file /etc/etcd/kubernetes-key.pem \\
--trusted-ca-file=/etc/etcd/ca.pem \\ --trusted-ca-file /etc/etcd/ca.pem \\
--peer-trusted-ca-file=/etc/etcd/ca.pem \\ --peer-trusted-ca-file /etc/etcd/ca.pem \\
--peer-client-cert-auth \\ --peer-client-cert-auth \\
--client-cert-auth \\ --client-cert-auth \\
--initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\ --initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
@ -84,7 +85,7 @@ ExecStart=/usr/local/bin/etcd \\
--initial-cluster-token etcd-cluster-0 \\ --initial-cluster-token etcd-cluster-0 \\
--initial-cluster controller-0=https://10.240.0.10:2380,controller-1=https://10.240.0.11:2380,controller-2=https://10.240.0.12:2380 \\ --initial-cluster controller-0=https://10.240.0.10:2380,controller-1=https://10.240.0.11:2380,controller-2=https://10.240.0.12:2380 \\
--initial-cluster-state new \\ --initial-cluster-state new \\
--data-dir=/var/lib/etcd --data-dir /var/lib/etcd
Restart=on-failure Restart=on-failure
RestartSec=5 RestartSec=5
@ -96,11 +97,7 @@ EOF
### Start the etcd Server ### Start the etcd Server
``` ```
{ sudo systemctl enable --now etcd
sudo systemctl daemon-reload
sudo systemctl enable etcd
sudo systemctl start etcd
}
``` ```
> Remember to run the above commands on each controller node: `controller-0`, `controller-1`, and `controller-2`. > Remember to run the above commands on each controller node: `controller-0`, `controller-1`, and `controller-2`.
@ -111,10 +108,10 @@ List the etcd cluster members:
``` ```
sudo ETCDCTL_API=3 etcdctl member list \ sudo ETCDCTL_API=3 etcdctl member list \
--endpoints=https://127.0.0.1:2379 \ --cacert /etc/etcd/ca.pem \
--cacert=/etc/etcd/ca.pem \ --cert /etc/etcd/kubernetes.pem \
--cert=/etc/etcd/kubernetes.pem \ --endpoints https://127.0.0.1:2379 \
--key=/etc/etcd/kubernetes-key.pem --key /etc/etcd/kubernetes-key.pem
``` ```
> output > output
@ -125,4 +122,4 @@ f98dc20bce6225a0, started, controller-0, https://10.240.0.10:2380, https://10.24
ffed16798470cab5, started, controller-1, https://10.240.0.11:2380, https://10.240.0.11:2379, false ffed16798470cab5, started, controller-1, https://10.240.0.11:2380, https://10.240.0.11:2379, false
``` ```
Next: [Bootstrapping the Kubernetes Control Plane](08-bootstrapping-kubernetes-controllers.md) Next: [Bootstrapping the Kubernetes Control Plane](./08-bootstrapping-kubernetes-controllers.md)

View File

@ -12,14 +12,14 @@ gcloud compute ssh controller-0
### Running commands in parallel with tmux ### Running commands in parallel with tmux
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab. [tmux](https://tmux.github.io/) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](./01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab.
## Provision the Kubernetes Control Plane ## Provision the Kubernetes Control Plane
Create the Kubernetes configuration directory: Create the Kubernetes configuration directory:
``` ```
sudo mkdir -p /etc/kubernetes/config sudo mkdir --parents /etc/kubernetes/config
``` ```
### Download and Install the Kubernetes Controller Binaries ### Download and Install the Kubernetes Controller Binaries
@ -27,91 +27,91 @@ sudo mkdir -p /etc/kubernetes/config
Download the official Kubernetes release binaries: Download the official Kubernetes release binaries:
``` ```
wget -q --show-progress --https-only --timestamping \ curl --location \
"https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kube-apiserver" \ --remote-name --time-cond kube-apiserver \
"https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kube-controller-manager" \ https://dl.k8s.io/release/v1.27.4/bin/linux/amd64/kube-apiserver \
"https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kube-scheduler" \ --remote-name --time-cond kube-controller-manager \
"https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl" https://dl.k8s.io/release/v1.27.4/bin/linux/amd64/kube-controller-manager \
--remote-name --time-cond kube-scheduler \
https://dl.k8s.io/release/v1.27.4/bin/linux/amd64/kube-scheduler \
--remote-name --time-cond kubectl \
https://dl.k8s.io/release/v1.27.4/bin/linux/amd64/kubectl
``` ```
Install the Kubernetes binaries: Install the Kubernetes binaries:
``` ```
{ sudo install --mode 0755 kube-apiserver kube-controller-manager \
chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl kube-scheduler kubectl /usr/local/bin/
sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/
}
``` ```
### Configure the Kubernetes API Server ### Configure the Kubernetes API Server
``` ```
{ sudo mkdir --parents /var/lib/kubernetes
sudo mkdir -p /var/lib/kubernetes/
sudo mv ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \ sudo cp \
ca-key.pem ca.pem \
kubernetes-key.pem kubernetes.pem \
service-account-key.pem service-account.pem \ service-account-key.pem service-account.pem \
encryption-config.yaml /var/lib/kubernetes/ encryption-config.yaml \
} /var/lib/kubernetes/
``` ```
The instance internal IP address will be used to advertise the API Server to members of the cluster. Retrieve the internal IP address for the current compute instance: The instance internal IP address will be used to advertise the API Server to members of the cluster. Retrieve the internal IP address for the current compute instance:
``` ```
INTERNAL_IP=$(curl -s -H "Metadata-Flavor: Google" \ INTERNAL_IP="$(curl --silent --header 'Metadata-Flavor: Google' \
http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip) http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip)"
```
``` REGION="$(curl --silent --header 'Metadata-Flavor: Google' \
REGION=$(curl -s -H "Metadata-Flavor: Google" \ http://metadata.google.internal/computeMetadata/v1/project/attributes/google-compute-default-region)"
http://metadata.google.internal/computeMetadata/v1/project/attributes/google-compute-default-region)
```
``` KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe \
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \ kubernetes-the-hard-way --region "${REGION}" --format 'value(address)')
--region $REGION \
--format 'value(address)')
``` ```
Create the `kube-apiserver.service` systemd unit file: Create the `kube-apiserver.service` systemd unit file:
``` ```
cat <<EOF | sudo tee /etc/systemd/system/kube-apiserver.service sudo mkdir --parents /usr/local/lib/systemd/system
cat <<EOF | sudo tee /usr/local/lib/systemd/system/kube-apiserver.service
[Unit] [Unit]
Description=Kubernetes API Server Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes Documentation=https://github.com/kubernetes/kubernetes
[Service] [Service]
ExecStart=/usr/local/bin/kube-apiserver \\ ExecStart=/usr/local/bin/kube-apiserver \\
--advertise-address=${INTERNAL_IP} \\ --advertise-address ${INTERNAL_IP} \\
--allow-privileged=true \\ --allow-privileged \\
--apiserver-count=3 \\ --apiserver-count 3 \\
--audit-log-maxage=30 \\ --audit-log-maxage 30 \\
--audit-log-maxbackup=3 \\ --audit-log-maxbackup 3 \\
--audit-log-maxsize=100 \\ --audit-log-maxsize 100 \\
--audit-log-path=/var/log/audit.log \\ --audit-log-path /var/log/audit.log \\
--authorization-mode=Node,RBAC \\ --authorization-mode Node,RBAC \\
--bind-address=0.0.0.0 \\ --bind-address 0.0.0.0 \\
--client-ca-file=/var/lib/kubernetes/ca.pem \\ --client-ca-file /var/lib/kubernetes/ca.pem \\
--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\ --enable-admission-plugins NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\
--etcd-cafile=/var/lib/kubernetes/ca.pem \\ --etcd-cafile /var/lib/kubernetes/ca.pem \\
--etcd-certfile=/var/lib/kubernetes/kubernetes.pem \\ --etcd-certfile /var/lib/kubernetes/kubernetes.pem \\
--etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem \\ --etcd-keyfile /var/lib/kubernetes/kubernetes-key.pem \\
--etcd-servers=https://10.240.0.10:2379,https://10.240.0.11:2379,https://10.240.0.12:2379 \\ --etcd-servers https://10.240.0.10:2379,https://10.240.0.11:2379,https://10.240.0.12:2379 \\
--event-ttl=1h \\ --event-ttl 1h \\
--encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml \\ --encryption-provider-config /var/lib/kubernetes/encryption-config.yaml \\
--kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \\ --kubelet-certificate-authority /var/lib/kubernetes/ca.pem \\
--kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem \\ --kubelet-client-certificate /var/lib/kubernetes/kubernetes.pem \\
--kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem \\ --kubelet-client-key /var/lib/kubernetes/kubernetes-key.pem \\
--runtime-config='api/all=true' \\ --runtime-config 'api/all=true' \\
--service-account-key-file=/var/lib/kubernetes/service-account.pem \\ --service-account-key-file /var/lib/kubernetes/service-account.pem \\
--service-account-signing-key-file=/var/lib/kubernetes/service-account-key.pem \\ --service-account-signing-key-file /var/lib/kubernetes/service-account-key.pem \\
--service-account-issuer=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \\ --service-account-issuer https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \\
--service-cluster-ip-range=10.32.0.0/24 \\ --service-cluster-ip-range 10.32.0.0/24 \\
--service-node-port-range=30000-32767 \\ --service-node-port-range 30000-32767 \\
--tls-cert-file=/var/lib/kubernetes/kubernetes.pem \\ --tls-cert-file /var/lib/kubernetes/kubernetes.pem \\
--tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \\ --tls-private-key-file /var/lib/kubernetes/kubernetes-key.pem \\
--v=2 --v 2
Restart=on-failure Restart=on-failure
RestartSec=5 RestartSec=5
@ -122,34 +122,34 @@ EOF
### Configure the Kubernetes Controller Manager ### Configure the Kubernetes Controller Manager
Move the `kube-controller-manager` kubeconfig into place: Copy the `kube-controller-manager` kubeconfig into place:
``` ```
sudo mv kube-controller-manager.kubeconfig /var/lib/kubernetes/ sudo cp kube-controller-manager.kubeconfig /var/lib/kubernetes/
``` ```
Create the `kube-controller-manager.service` systemd unit file: Create the `kube-controller-manager.service` systemd unit file:
``` ```
cat <<EOF | sudo tee /etc/systemd/system/kube-controller-manager.service cat <<EOF | sudo tee /usr/local/lib/systemd/system/kube-controller-manager.service
[Unit] [Unit]
Description=Kubernetes Controller Manager Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes Documentation=https://github.com/kubernetes/kubernetes
[Service] [Service]
ExecStart=/usr/local/bin/kube-controller-manager \\ ExecStart=/usr/local/bin/kube-controller-manager \\
--bind-address=0.0.0.0 \\ --bind-address 0.0.0.0 \\
--cluster-cidr=10.200.0.0/16 \\ --cluster-cidr 10.200.0.0/16 \\
--cluster-name=kubernetes \\ --cluster-name kubernetes \\
--cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \\ --cluster-signing-cert-file /var/lib/kubernetes/ca.pem \\
--cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \\ --cluster-signing-key-file /var/lib/kubernetes/ca-key.pem \\
--kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\ --kubeconfig /var/lib/kubernetes/kube-controller-manager.kubeconfig \\
--leader-elect=true \\ --leader-elect \\
--root-ca-file=/var/lib/kubernetes/ca.pem \\ --root-ca-file /var/lib/kubernetes/ca.pem \\
--service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem \\ --service-account-private-key-file /var/lib/kubernetes/service-account-key.pem \\
--service-cluster-ip-range=10.32.0.0/24 \\ --service-cluster-ip-range 10.32.0.0/24 \\
--use-service-account-credentials=true \\ --use-service-account-credentials \\
--v=2 --v 2
Restart=on-failure Restart=on-failure
RestartSec=5 RestartSec=5
@ -160,20 +160,20 @@ EOF
### Configure the Kubernetes Scheduler ### Configure the Kubernetes Scheduler
Move the `kube-scheduler` kubeconfig into place: Copy the `kube-scheduler` kubeconfig into place:
``` ```
sudo mv kube-scheduler.kubeconfig /var/lib/kubernetes/ sudo cp kube-scheduler.kubeconfig /var/lib/kubernetes/
``` ```
Create the `kube-scheduler.yaml` configuration file: Create the `kube-scheduler.yaml` configuration file:
``` ```
cat <<EOF | sudo tee /etc/kubernetes/config/kube-scheduler.yaml cat <<EOF | sudo tee /etc/kubernetes/config/kube-scheduler.yaml
apiVersion: kubescheduler.config.k8s.io/v1beta1 apiVersion: kubescheduler.config.k8s.io/v1
kind: KubeSchedulerConfiguration kind: KubeSchedulerConfiguration
clientConnection: clientConnection:
kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig" kubeconfig: /var/lib/kubernetes/kube-scheduler.kubeconfig
leaderElection: leaderElection:
leaderElect: true leaderElect: true
EOF EOF
@ -182,15 +182,15 @@ EOF
Create the `kube-scheduler.service` systemd unit file: Create the `kube-scheduler.service` systemd unit file:
``` ```
cat <<EOF | sudo tee /etc/systemd/system/kube-scheduler.service cat <<EOF | sudo tee /usr/local/lib/systemd/system/kube-scheduler.service
[Unit] [Unit]
Description=Kubernetes Scheduler Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes Documentation=https://github.com/kubernetes/kubernetes
[Service] [Service]
ExecStart=/usr/local/bin/kube-scheduler \\ ExecStart=/usr/local/bin/kube-scheduler \\
--config=/etc/kubernetes/config/kube-scheduler.yaml \\ --config /etc/kubernetes/config/kube-scheduler.yaml \\
--v=2 --v 2
Restart=on-failure Restart=on-failure
RestartSec=5 RestartSec=5
@ -202,30 +202,23 @@ EOF
### Start the Controller Services ### Start the Controller Services
``` ```
{ sudo systemctl enable --now kube-apiserver kube-controller-manager kube-scheduler
sudo systemctl daemon-reload
sudo systemctl enable kube-apiserver kube-controller-manager kube-scheduler
sudo systemctl start kube-apiserver kube-controller-manager kube-scheduler
}
``` ```
> Allow up to 10 seconds for the Kubernetes API Server to fully initialize. > Allow up to 10 seconds for the Kubernetes API Server to fully initialize.
### Enable HTTP Health Checks ### Enable HTTP Health Checks
A [Google Network Load Balancer](https://cloud.google.com/compute/docs/load-balancing/network) will be used to distribute traffic across the three API servers and allow each API server to terminate TLS connections and validate client certificates. The network load balancer only supports HTTP health checks which means the HTTPS endpoint exposed by the API server cannot be used. As a workaround the nginx webserver can be used to proxy HTTP health checks. In this section nginx will be installed and configured to accept HTTP health checks on port `80` and proxy the connections to the API server on `https://127.0.0.1:6443/healthz`. A [Google Network Load Balancer](https://cloud.google.com/load-balancing/docs/network) will be used to distribute traffic across the three API servers and allow each API server to terminate TLS connections and validate client certificates. The network load balancer only supports HTTP health checks which means the HTTPS endpoint exposed by the API server cannot be used. As a workaround the nginx webserver can be used to proxy HTTP health checks. In this section nginx will be installed and configured to accept HTTP health checks on port `80` and proxy the connections to the API server on `https://127.0.0.1:6443/healthz`.
> The `/healthz` API server endpoint does not require authentication by default. > The `/healthz` API server endpoint does not require authentication by default.
Install a basic web server to handle HTTP health checks: Install a basic web server to handle HTTP health checks:
``` ```
sudo apt-get update sudo apt-get install --yes nginx
sudo apt-get install -y nginx
```
``` cat <<EOF | sudo tee /etc/nginx/sites-available/kubernetes.default.svc.cluster.local
cat > kubernetes.default.svc.cluster.local <<EOF
server { server {
listen 80; listen 80;
server_name kubernetes.default.svc.cluster.local; server_name kubernetes.default.svc.cluster.local;
@ -236,31 +229,22 @@ server {
} }
} }
EOF EOF
```
``` sudo ln --symbolic \
{ /etc/nginx/sites-available/kubernetes.default.svc.cluster.local \
sudo mv kubernetes.default.svc.cluster.local \ /etc/nginx/sites-enabled/
/etc/nginx/sites-available/kubernetes.default.svc.cluster.local
sudo ln -s /etc/nginx/sites-available/kubernetes.default.svc.cluster.local /etc/nginx/sites-enabled/
}
```
```
sudo systemctl restart nginx sudo systemctl restart nginx
``` ```
```
sudo systemctl enable nginx
```
### Verification ### Verification
``` ```
kubectl cluster-info --kubeconfig admin.kubeconfig kubectl cluster-info --kubeconfig admin.kubeconfig
``` ```
> output
``` ```
Kubernetes control plane is running at https://127.0.0.1:6443 Kubernetes control plane is running at https://127.0.0.1:6443
``` ```
@ -268,20 +252,24 @@ Kubernetes control plane is running at https://127.0.0.1:6443
Test the nginx HTTP health check proxy: Test the nginx HTTP health check proxy:
``` ```
curl -H "Host: kubernetes.default.svc.cluster.local" -i http://127.0.0.1/healthz curl --header 'Host: kubernetes.default.svc.cluster.local' --include \
http://127.0.0.1/healthz
``` ```
> output
``` ```
HTTP/1.1 200 OK HTTP/1.1 200 OK
Server: nginx/1.18.0 (Ubuntu) Server: nginx/1.18.0 (Ubuntu)
Date: Sun, 02 May 2021 04:19:29 GMT Date: Wed, 26 Jul 2023 13:35:08 GMT
Content-Type: text/plain; charset=utf-8 Content-Type: text/plain; charset=utf-8
Content-Length: 2 Content-Length: 2
Connection: keep-alive Connection: keep-alive
Audit-Id: d87ab78c-776b-42f9-950c-42c7b6060e7f
Cache-Control: no-cache, private Cache-Control: no-cache, private
X-Content-Type-Options: nosniff X-Content-Type-Options: nosniff
X-Kubernetes-Pf-Flowschema-Uid: c43f32eb-e038-457f-9474-571d43e5c325 X-Kubernetes-Pf-Flowschema-Uid: bb5f446a-26d9-4f6e-a18f-d40546253482
X-Kubernetes-Pf-Prioritylevel-Uid: 8ba5908f-5569-4330-80fd-c643e7512366 X-Kubernetes-Pf-Prioritylevel-Uid: 34a0ffbd-2fd0-44b8-b7ab-d9c883cabb34
ok ok
``` ```
@ -292,7 +280,7 @@ ok
In this section you will configure RBAC permissions to allow the Kubernetes API Server to access the Kubelet API on each worker node. Access to the Kubelet API is required for retrieving metrics, logs, and executing commands in pods. In this section you will configure RBAC permissions to allow the Kubernetes API Server to access the Kubelet API on each worker node. Access to the Kubelet API is required for retrieving metrics, logs, and executing commands in pods.
> This tutorial sets the Kubelet `--authorization-mode` flag to `Webhook`. Webhook mode uses the [SubjectAccessReview](https://kubernetes.io/docs/admin/authorization/#checking-api-access) API to determine authorization. > This tutorial sets the Kubelet `--authorization-mode` flag to `Webhook`. Webhook mode uses the [SubjectAccessReview](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#checking-api-access) API to determine authorization.
The commands in this section will effect the entire cluster and only need to be run once from one of the controller nodes. The commands in this section will effect the entire cluster and only need to be run once from one of the controller nodes.
@ -300,10 +288,10 @@ The commands in this section will effect the entire cluster and only need to be
gcloud compute ssh controller-0 gcloud compute ssh controller-0
``` ```
Create the `system:kube-apiserver-to-kubelet` [ClusterRole](https://kubernetes.io/docs/admin/authorization/rbac/#role-and-clusterrole) with permissions to access the Kubelet API and perform most common tasks associated with managing pods: Create the `system:kube-apiserver-to-kubelet` [ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) with permissions to access the Kubelet API and perform most common tasks associated with managing pods:
``` ```
cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f - cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig --filename -
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
@ -331,7 +319,7 @@ The Kubernetes API Server authenticates to the Kubelet as the `kubernetes` user
Bind the `system:kube-apiserver-to-kubelet` ClusterRole to the `kubernetes` user: Bind the `system:kube-apiserver-to-kubelet` ClusterRole to the `kubernetes` user:
``` ```
cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f - cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig --filename -
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:
@ -354,39 +342,34 @@ In this section you will provision an external load balancer to front the Kubern
> The compute instances created in this tutorial will not have permission to complete this section. **Run the following commands from the same machine used to create the compute instances**. > The compute instances created in this tutorial will not have permission to complete this section. **Run the following commands from the same machine used to create the compute instances**.
### Provision a Network Load Balancer ### Provision a Network Load Balancer
Create the external load balancer network resources: Create the external load balancer network resources:
``` ```
{ KUBERNETES_PUBLIC_ADDRESS="$(gcloud compute addresses describe kubernetes-the-hard-way \
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \ --format 'value(address)')"
--region $(gcloud config get-value compute/region) \
--format 'value(address)')
gcloud compute http-health-checks create kubernetes \ gcloud compute http-health-checks create kubernetes \
--description "Kubernetes Health Check" \ --description 'Kubernetes Health Check' \
--host "kubernetes.default.svc.cluster.local" \ --host kubernetes.default.svc.cluster.local \
--request-path "/healthz" --request-path /healthz
gcloud compute firewall-rules create kubernetes-the-hard-way-allow-health-check \ gcloud compute firewall-rules create kubernetes-the-hard-way-allow-health-check \
--allow tcp \
--network kubernetes-the-hard-way \ --network kubernetes-the-hard-way \
--source-ranges 209.85.152.0/22,209.85.204.0/22,35.191.0.0/16 \ --source-ranges 209.85.152.0/22,209.85.204.0/22,35.191.0.0/16
--allow tcp
gcloud compute target-pools create kubernetes-target-pool \ gcloud compute target-pools create kubernetes-target-pool \
--http-health-check kubernetes --http-health-check kubernetes
gcloud compute target-pools add-instances kubernetes-target-pool \ gcloud compute target-pools add-instances kubernetes-target-pool \
--instances controller-0,controller-1,controller-2 --instances controller-0,controller-1,controller-2
gcloud compute forwarding-rules create kubernetes-forwarding-rule \ gcloud compute forwarding-rules create kubernetes-forwarding-rule \
--address ${KUBERNETES_PUBLIC_ADDRESS} \ --address "${KUBERNETES_PUBLIC_ADDRESS}" \
--ports 6443 \ --ports 6443 \
--region $(gcloud config get-value compute/region) \
--target-pool kubernetes-target-pool --target-pool kubernetes-target-pool
}
``` ```
### Verification ### Verification
@ -396,15 +379,14 @@ Create the external load balancer network resources:
Retrieve the `kubernetes-the-hard-way` static IP address: Retrieve the `kubernetes-the-hard-way` static IP address:
``` ```
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \ KUBERNETES_PUBLIC_ADDRESS="$(gcloud compute addresses describe kubernetes-the-hard-way \
--region $(gcloud config get-value compute/region) \ --format 'value(address)')"
--format 'value(address)')
``` ```
Make a HTTP request for the Kubernetes version info: Make a HTTP request for the Kubernetes version info:
``` ```
curl --cacert ca.pem https://${KUBERNETES_PUBLIC_ADDRESS}:6443/version curl --cacert ca.pem "https://${KUBERNETES_PUBLIC_ADDRESS}:6443/version"
``` ```
> output > output
@ -412,15 +394,15 @@ curl --cacert ca.pem https://${KUBERNETES_PUBLIC_ADDRESS}:6443/version
``` ```
{ {
"major": "1", "major": "1",
"minor": "21", "minor": "27",
"gitVersion": "v1.21.0", "gitVersion": "v1.27.4",
"gitCommit": "cb303e613a121a29364f75cc67d3d580833a7479", "gitCommit": "fa3d7990104d7c1f16943a67f11b154b71f6a132",
"gitTreeState": "clean", "gitTreeState": "clean",
"buildDate": "2021-04-08T16:25:06Z", "buildDate": "2023-07-19T12:14:49Z",
"goVersion": "go1.16.1", "goVersion": "go1.20.6",
"compiler": "gc", "compiler": "gc",
"platform": "linux/amd64" "platform": "linux/amd64"
} }
``` ```
Next: [Bootstrapping the Kubernetes Worker Nodes](09-bootstrapping-kubernetes-workers.md) Next: [Bootstrapping the Kubernetes Worker Nodes](./09-bootstrapping-kubernetes-workers.md)

View File

@ -1,6 +1,6 @@
# Bootstrapping the Kubernetes Worker Nodes # Bootstrapping the Kubernetes Worker Nodes
In this lab you will bootstrap three Kubernetes worker nodes. The following components will be installed on each node: [runc](https://github.com/opencontainers/runc), [container networking plugins](https://github.com/containernetworking/cni), [containerd](https://github.com/containerd/containerd), [kubelet](https://kubernetes.io/docs/admin/kubelet), and [kube-proxy](https://kubernetes.io/docs/concepts/cluster-administration/proxies). In this lab you will bootstrap three Kubernetes worker nodes. The following components will be installed on each node: [containerd](https://github.com/containerd/containerd), [runc](https://github.com/opencontainers/runc), [container networking plugins](https://github.com/containernetworking/plugins), [crictl](https://github.com/kubernetes-sigs/cri-tools), [kube-proxy](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/), [kubectl](https://kubernetes.io/docs/reference/kubectl/), and [kubelet](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/).
## Prerequisites ## Prerequisites
@ -12,17 +12,16 @@ gcloud compute ssh worker-0
### Running commands in parallel with tmux ### Running commands in parallel with tmux
[tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab. [tmux](https://tmux.github.io/) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](./01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab.
## Provisioning a Kubernetes Worker Node ## Provisioning a Kubernetes Worker Node
Install the OS dependencies: Install the OS dependencies ([conntrack](https://conntrack-tools.netfilter.org/), [ipset](https://ipset.netfilter.org/), and [socat](socat)):
``` ```
{ sudo apt-get update
sudo apt-get update
sudo apt-get -y install socat conntrack ipset sudo apt-get --yes install conntrack ipset socat
}
``` ```
> The socat binary enables support for the `kubectl port-forward` command. > The socat binary enables support for the `kubectl port-forward` command.
@ -37,10 +36,10 @@ Verify if swap is enabled:
sudo swapon --show sudo swapon --show
``` ```
If output is empthy then swap is not enabled. If swap is enabled run the following command to disable swap immediately: If output is empty then swap is not enabled. If swap is enabled run the following command to disable swap immediately:
``` ```
sudo swapoff -a sudo swapoff --all
``` ```
> To ensure swap remains off after reboot consult your Linux distro documentation. > To ensure swap remains off after reboot consult your Linux distro documentation.
@ -48,24 +47,33 @@ sudo swapoff -a
### Download and Install Worker Binaries ### Download and Install Worker Binaries
``` ```
wget -q --show-progress --https-only --timestamping \ curl --location \
https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.21.0/crictl-v1.21.0-linux-amd64.tar.gz \ --remote-name --time-cond containerd-1.7.3-linux-amd64.tar.gz \
https://github.com/opencontainers/runc/releases/download/v1.0.0-rc93/runc.amd64 \ https://github.com/containerd/containerd/releases/download/v1.7.3/containerd-1.7.3-linux-amd64.tar.gz \
https://github.com/containernetworking/plugins/releases/download/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz \ --remote-name --time-cond containerd.service \
https://github.com/containerd/containerd/releases/download/v1.4.4/containerd-1.4.4-linux-amd64.tar.gz \ https://raw.githubusercontent.com/containerd/containerd/v1.7.3/containerd.service \
https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl \ --output runc --time-cond runc \
https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kube-proxy \ https://github.com/opencontainers/runc/releases/download/v1.1.8/runc.amd64 \
https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet --remote-name --time-cond cni-plugins-linux-amd64-v1.3.0.tgz \
https://github.com/containernetworking/plugins/releases/download/v1.3.0/cni-plugins-linux-amd64-v1.3.0.tgz \
--remote-name --time-cond crictl-v1.27.1-linux-amd64.tar.gz \
https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.27.1/crictl-v1.27.1-linux-amd64.tar.gz \
--remote-name --time-cond kube-proxy \
https://dl.k8s.io/release/v1.27.4/bin/linux/amd64/kube-proxy \
--remote-name --time-cond kubectl \
https://dl.k8s.io/release/v1.27.4/bin/linux/amd64/kubectl \
--remote-name --time-cond kubelet \
https://dl.k8s.io/release/v1.27.4/bin/linux/amd64/kubelet
``` ```
Create the installation directories: Create the installation directories:
``` ```
sudo mkdir -p \ sudo mkdir --parents \
/etc/cni/net.d \ /etc/cni/net.d \
/opt/cni/bin \ /opt/cni/bin \
/var/lib/kubelet \
/var/lib/kube-proxy \ /var/lib/kube-proxy \
/var/lib/kubelet \
/var/lib/kubernetes \ /var/lib/kubernetes \
/var/run/kubernetes /var/run/kubernetes
``` ```
@ -73,16 +81,21 @@ sudo mkdir -p \
Install the worker binaries: Install the worker binaries:
``` ```
{ sudo tar --directory /usr/local/ --extract \
mkdir containerd --file containerd-1.7.3-linux-amd64.tar.gz --gunzip --verbose
tar -xvf crictl-v1.21.0-linux-amd64.tar.gz
tar -xvf containerd-1.4.4-linux-amd64.tar.gz -C containerd sudo mkdir --parents /usr/local/lib/systemd/system
sudo tar -xvf cni-plugins-linux-amd64-v0.9.1.tgz -C /opt/cni/bin/
sudo mv runc.amd64 runc sudo cp containerd.service /usr/local/lib/systemd/system/
chmod +x crictl kubectl kube-proxy kubelet runc
sudo mv crictl kubectl kube-proxy kubelet runc /usr/local/bin/ sudo install --mode 0755 runc /usr/local/sbin/
sudo mv containerd/bin/* /bin/
} tar --extract --file crictl-v1.27.1-linux-amd64.tar.gz --gunzip --verbose
sudo tar --directory /opt/cni/bin/ --extract \
--file cni-plugins-linux-amd64-v1.3.0.tgz --gunzip --verbose
sudo install --mode 0755 crictl kube-proxy kubectl kubelet /usr/local/bin/
``` ```
### Configure CNI Networking ### Configure CNI Networking
@ -90,40 +103,42 @@ Install the worker binaries:
Retrieve the Pod CIDR range for the current compute instance: Retrieve the Pod CIDR range for the current compute instance:
``` ```
POD_CIDR=$(curl -s -H "Metadata-Flavor: Google" \ POD_CIDR="$(curl --silent --header 'Metadata-Flavor: Google' \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/pod-cidr) http://metadata.google.internal/computeMetadata/v1/instance/attributes/pod-cidr)"
``` ```
Create the `bridge` network configuration file: Create the CNI config file:
``` ```
cat <<EOF | sudo tee /etc/cni/net.d/10-bridge.conf cat << EOF | sudo tee /etc/cni/net.d/10-containerd-net.conflist
{ {
"cniVersion": "0.4.0", "cniVersion": "1.0.0",
"name": "bridge", "name": "containerd-net",
"plugins": [
{
"type": "bridge", "type": "bridge",
"bridge": "cnio0", "bridge": "cni0",
"isGateway": true, "isGateway": true,
"ipMasq": true, "ipMasq": true,
"promiscMode": true,
"ipam": { "ipam": {
"type": "host-local", "type": "host-local",
"ranges": [ "ranges": [
[{"subnet": "${POD_CIDR}"}] [{
"subnet": "${POD_CIDR}"
}]
], ],
"routes": [{"dst": "0.0.0.0/0"}] "routes": [
{ "dst": "0.0.0.0/0" }
]
} }
} },
EOF {
``` "type": "portmap",
"capabilities": {"portMappings": true},
Create the `loopback` network configuration file: "externalSetMarkChain": "KUBE-MARK-MASQ"
}
``` ]
cat <<EOF | sudo tee /etc/cni/net.d/99-loopback.conf
{
"cniVersion": "0.4.0",
"name": "lo",
"type": "loopback"
} }
EOF EOF
``` ```
@ -133,55 +148,19 @@ EOF
Create the `containerd` configuration file: Create the `containerd` configuration file:
``` ```
sudo mkdir -p /etc/containerd/ sudo mkdir --parents /etc/containerd
```
``` containerd config default | sudo tee /etc/containerd/config.toml
cat << EOF | sudo tee /etc/containerd/config.toml
[plugins]
[plugins.cri.containerd]
snapshotter = "overlayfs"
[plugins.cri.containerd.default_runtime]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = "/usr/local/bin/runc"
runtime_root = ""
EOF
```
Create the `containerd.service` systemd unit file:
```
cat <<EOF | sudo tee /etc/systemd/system/containerd.service
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target
[Service]
ExecStartPre=/sbin/modprobe overlay
ExecStart=/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
[Install]
WantedBy=multi-user.target
EOF
``` ```
### Configure the Kubelet ### Configure the Kubelet
``` ```
{ sudo cp "${HOSTNAME}-key.pem" "${HOSTNAME}.pem" /var/lib/kubelet/
sudo mv ${HOSTNAME}-key.pem ${HOSTNAME}.pem /var/lib/kubelet/
sudo mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig sudo cp "${HOSTNAME}.kubeconfig" /var/lib/kubelet/kubeconfig
sudo mv ca.pem /var/lib/kubernetes/
} sudo cp ca.pem /var/lib/kubernetes/
``` ```
Create the `kubelet-config.yaml` configuration file: Create the `kubelet-config.yaml` configuration file:
@ -215,7 +194,7 @@ EOF
Create the `kubelet.service` systemd unit file: Create the `kubelet.service` systemd unit file:
``` ```
cat <<EOF | sudo tee /etc/systemd/system/kubelet.service cat <<EOF | sudo tee /usr/local/lib/systemd/system/kubelet.service
[Unit] [Unit]
Description=Kubernetes Kubelet Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes Documentation=https://github.com/kubernetes/kubernetes
@ -224,14 +203,11 @@ Requires=containerd.service
[Service] [Service]
ExecStart=/usr/local/bin/kubelet \\ ExecStart=/usr/local/bin/kubelet \\
--config=/var/lib/kubelet/kubelet-config.yaml \\ --config /var/lib/kubelet/kubelet-config.yaml \\
--container-runtime=remote \\ --container-runtime-endpoint unix:///var/run/containerd/containerd.sock \\
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\ --kubeconfig /var/lib/kubelet/kubeconfig \\
--image-pull-progress-deadline=2m \\ --register-node \\
--kubeconfig=/var/lib/kubelet/kubeconfig \\ --v 2
--network-plugin=cni \\
--register-node=true \\
--v=2
Restart=on-failure Restart=on-failure
RestartSec=5 RestartSec=5
@ -243,7 +219,7 @@ EOF
### Configure the Kubernetes Proxy ### Configure the Kubernetes Proxy
``` ```
sudo mv kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig sudo cp kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig
``` ```
Create the `kube-proxy-config.yaml` configuration file: Create the `kube-proxy-config.yaml` configuration file:
@ -262,14 +238,14 @@ EOF
Create the `kube-proxy.service` systemd unit file: Create the `kube-proxy.service` systemd unit file:
``` ```
cat <<EOF | sudo tee /etc/systemd/system/kube-proxy.service cat <<EOF | sudo tee /usr/local/lib/systemd/system/kube-proxy.service
[Unit] [Unit]
Description=Kubernetes Kube Proxy Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes Documentation=https://github.com/kubernetes/kubernetes
[Service] [Service]
ExecStart=/usr/local/bin/kube-proxy \\ ExecStart=/usr/local/bin/kube-proxy \\
--config=/var/lib/kube-proxy/kube-proxy-config.yaml --config /var/lib/kube-proxy/kube-proxy-config.yaml
Restart=on-failure Restart=on-failure
RestartSec=5 RestartSec=5
@ -281,11 +257,7 @@ EOF
### Start the Worker Services ### Start the Worker Services
``` ```
{ sudo systemctl enable --now containerd kubelet kube-proxy
sudo systemctl daemon-reload
sudo systemctl enable containerd kubelet kube-proxy
sudo systemctl start containerd kubelet kube-proxy
}
``` ```
> Remember to run the above commands on each worker node: `worker-0`, `worker-1`, and `worker-2`. > Remember to run the above commands on each worker node: `worker-0`, `worker-1`, and `worker-2`.
@ -298,16 +270,16 @@ List the registered Kubernetes nodes:
``` ```
gcloud compute ssh controller-0 \ gcloud compute ssh controller-0 \
--command "kubectl get nodes --kubeconfig admin.kubeconfig" --command 'kubectl get nodes --kubeconfig admin.kubeconfig'
``` ```
> output > output
``` ```
NAME STATUS ROLES AGE VERSION NAME STATUS ROLES AGE VERSION
worker-0 Ready <none> 22s v1.21.0 worker-0 Ready <none> 37s v1.27.4
worker-1 Ready <none> 22s v1.21.0 worker-1 Ready <none> 37s v1.27.4
worker-2 Ready <none> 22s v1.21.0 worker-2 Ready <none> 37s v1.27.4
``` ```
Next: [Configuring kubectl for Remote Access](10-configuring-kubectl.md) Next: [Configuring kubectl for Remote Access](./10-configuring-kubectl.md)

View File

@ -11,26 +11,23 @@ Each kubeconfig requires a Kubernetes API Server to connect to. To support high
Generate a kubeconfig file suitable for authenticating as the `admin` user: Generate a kubeconfig file suitable for authenticating as the `admin` user:
``` ```
{ KUBERNETES_PUBLIC_ADDRESS="$(gcloud compute addresses describe kubernetes-the-hard-way \
KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \ --format 'value(address)')"
--region $(gcloud config get-value compute/region) \
--format 'value(address)')
kubectl config set-cluster kubernetes-the-hard-way \ kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=ca.pem \ --certificate-authority ca.pem \
--embed-certs=true \ --embed-certs \
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 --server "https://${KUBERNETES_PUBLIC_ADDRESS}:6443"
kubectl config set-credentials admin \ kubectl config set-credentials admin \
--client-certificate=admin.pem \ --client-certificate admin.pem \
--client-key=admin-key.pem --client-key admin-key.pem
kubectl config set-context kubernetes-the-hard-way \ kubectl config set-context kubernetes-the-hard-way \
--cluster=kubernetes-the-hard-way \ --cluster kubernetes-the-hard-way \
--user=admin --user admin
kubectl config use-context kubernetes-the-hard-way kubectl config use-context kubernetes-the-hard-way
}
``` ```
## Verification ## Verification
@ -38,14 +35,15 @@ Generate a kubeconfig file suitable for authenticating as the `admin` user:
Check the version of the remote Kubernetes cluster: Check the version of the remote Kubernetes cluster:
``` ```
kubectl version kubectl version --short
``` ```
> output > output
``` ```
Client Version: version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.0", GitCommit:"cb303e613a121a29364f75cc67d3d580833a7479", GitTreeState:"clean", BuildDate:"2021-04-08T16:31:21Z", GoVersion:"go1.16.1", Compiler:"gc", Platform:"linux/amd64"} Client Version: v1.27.4
Server Version: version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.0", GitCommit:"cb303e613a121a29364f75cc67d3d580833a7479", GitTreeState:"clean", BuildDate:"2021-04-08T16:25:06Z", GoVersion:"go1.16.1", Compiler:"gc", Platform:"linux/amd64"} Kustomize Version: v5.0.1
Server Version: v1.27.4
``` ```
List the nodes in the remote Kubernetes cluster: List the nodes in the remote Kubernetes cluster:
@ -58,9 +56,9 @@ kubectl get nodes
``` ```
NAME STATUS ROLES AGE VERSION NAME STATUS ROLES AGE VERSION
worker-0 Ready <none> 2m35s v1.21.0 worker-0 Ready <none> 5m38s v1.27.4
worker-1 Ready <none> 2m35s v1.21.0 worker-1 Ready <none> 5m38s v1.27.4
worker-2 Ready <none> 2m35s v1.21.0 worker-2 Ready <none> 5m38s v1.27.4
``` ```
Next: [Provisioning Pod Network Routes](11-pod-network-routes.md) Next: [Provisioning Pod Network Routes](./11-pod-network-routes.md)

View File

@ -1,10 +1,10 @@
# Provisioning Pod Network Routes # Provisioning Pod Network Routes
Pods scheduled to a node receive an IP address from the node's Pod CIDR range. At this point pods can not communicate with other pods running on different nodes due to missing network [routes](https://cloud.google.com/compute/docs/vpc/routes). Pods scheduled to a node receive an IP address from the node's Pod CIDR range. At this point pods can not communicate with other pods running on different nodes due to missing network [routes](https://cloud.google.com/vpc/docs/routes).
In this lab you will create a route for each worker node that maps the node's Pod CIDR range to the node's internal IP address. In this lab you will create a route for each worker node that maps the node's Pod CIDR range to the node's internal IP address.
> There are [other ways](https://kubernetes.io/docs/concepts/cluster-administration/networking/#how-to-achieve-this) to implement the Kubernetes networking model. > There are [other ways](https://kubernetes.io/docs/concepts/cluster-administration/networking/#how-to-implement-the-kubernetes-network-model) to implement the Kubernetes networking model.
## The Routing Table ## The Routing Table
@ -14,7 +14,7 @@ Print the internal IP address and Pod CIDR range for each worker instance:
``` ```
for instance in worker-0 worker-1 worker-2; do for instance in worker-0 worker-1 worker-2; do
gcloud compute instances describe ${instance} \ gcloud compute instances describe "${instance}" \
--format 'value[separator=" "](networkInterfaces[0].networkIP,metadata.items[0].value)' --format 'value[separator=" "](networkInterfaces[0].networkIP,metadata.items[0].value)'
done done
``` ```
@ -33,17 +33,17 @@ Create network routes for each worker instance:
``` ```
for i in 0 1 2; do for i in 0 1 2; do
gcloud compute routes create kubernetes-route-10-200-${i}-0-24 \ gcloud compute routes create "kubernetes-route-10-200-${i}-0-24" \
--destination-range "10.200.${i}.0/24" \
--network kubernetes-the-hard-way \ --network kubernetes-the-hard-way \
--next-hop-address 10.240.0.2${i} \ --next-hop-address "10.240.0.2${i}"
--destination-range 10.200.${i}.0/24
done done
``` ```
List the routes in the `kubernetes-the-hard-way` VPC network: List the routes in the `kubernetes-the-hard-way` VPC network:
``` ```
gcloud compute routes list --filter "network: kubernetes-the-hard-way" gcloud compute routes list --filter 'network: kubernetes-the-hard-way'
``` ```
> output > output
@ -57,4 +57,4 @@ kubernetes-route-10-200-1-0-24 kubernetes-the-hard-way 10.200.1.0/24 10.240.0
kubernetes-route-10-200-2-0-24 kubernetes-the-hard-way 10.200.2.0/24 10.240.0.22 1000 kubernetes-route-10-200-2-0-24 kubernetes-the-hard-way 10.200.2.0/24 10.240.0.22 1000
``` ```
Next: [Deploying the DNS Cluster Add-on](12-dns-addon.md) Next: [Deploying the DNS Cluster Add-on](./12-dns-addon.md)

View File

@ -1,13 +1,13 @@
# Deploying the DNS Cluster Add-on # Deploying the DNS Cluster Add-on
In this lab you will deploy the [DNS add-on](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) which provides DNS based service discovery, backed by [CoreDNS](https://coredns.io/), to applications running inside the Kubernetes cluster. In this lab you will deploy the [DNS add-on](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) which provides DNS based service discovery, backed by [CoreDNS](https://github.com/coredns/coredns), to applications running inside the Kubernetes cluster.
## The DNS Cluster Add-on ## The DNS Cluster Add-on
Deploy the `coredns` cluster add-on: Deploy the `coredns` cluster add-on:
``` ```
kubectl apply -f https://storage.googleapis.com/kubernetes-the-hard-way/coredns-1.8.yaml kubectl apply --filename ./manifests/coredns-1.10.1.yaml
``` ```
> output > output
@ -17,14 +17,14 @@ serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created configmap/coredns created
deployment.apps/coredns created
service/kube-dns created service/kube-dns created
deployment.apps/coredns created
``` ```
List the pods created by the `kube-dns` deployment: List the pods created by the `kube-dns` deployment:
``` ```
kubectl get pods -l k8s-app=kube-dns -n kube-system kubectl get pods --namespace kube-system --selector k8s-app=kube-dns
``` ```
> output > output
@ -37,16 +37,16 @@ coredns-8494f9c688-zqrj2 1/1 Running 0 10s
## Verification ## Verification
Create a `busybox` deployment: Create a `busybox` pod:
``` ```
kubectl run busybox --image=busybox:1.28 --command -- sleep 3600 kubectl run busybox --image busybox:1.36.1 --command -- sleep infinity
``` ```
List the pod created by the `busybox` deployment: List the pod created:
``` ```
kubectl get pods -l run=busybox kubectl get pods --selector run=busybox
``` ```
> output > output
@ -59,13 +59,14 @@ busybox 1/1 Running 0 3s
Retrieve the full name of the `busybox` pod: Retrieve the full name of the `busybox` pod:
``` ```
POD_NAME=$(kubectl get pods -l run=busybox -o jsonpath="{.items[0].metadata.name}") POD_NAME=$(kubectl get pods --selector run=busybox \
--output jsonpath="{.items[0].metadata.name}")
``` ```
Execute a DNS lookup for the `kubernetes` service inside the `busybox` pod: Execute a DNS lookup for the `kubernetes` service inside the `busybox` pod:
``` ```
kubectl exec -ti $POD_NAME -- nslookup kubernetes kubectl exec --stdin --tty "${POD_NAME}" -- nslookup kubernetes
``` ```
> output > output
@ -78,4 +79,10 @@ Name: kubernetes
Address 1: 10.32.0.1 kubernetes.default.svc.cluster.local Address 1: 10.32.0.1 kubernetes.default.svc.cluster.local
``` ```
Next: [Smoke Test](13-smoke-test.md) Delete the `busybox` pod:
```
kubectl delete pod "${POD_NAME}"
```
Next: [Smoke Test](./13-smoke-test.md)

View File

@ -10,19 +10,20 @@ Create a generic secret:
``` ```
kubectl create secret generic kubernetes-the-hard-way \ kubectl create secret generic kubernetes-the-hard-way \
--from-literal="mykey=mydata" --from-literal 'mykey=mydata'
``` ```
Print a hexdump of the `kubernetes-the-hard-way` secret stored in etcd: Print a hexdump of the `kubernetes-the-hard-way` secret stored in etcd:
``` ```
gcloud compute ssh controller-0 \ gcloud compute ssh controller-0 \
--command "sudo ETCDCTL_API=3 etcdctl get \ --command 'sudo ETCDCTL_API=3 etcdctl get \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/etcd/ca.pem \ --cacert=/etc/etcd/ca.pem \
--cert=/etc/etcd/kubernetes.pem \ --cert=/etc/etcd/kubernetes.pem \
--endpoints=https://127.0.0.1:2379 \
--key=/etc/etcd/kubernetes-key.pem\ --key=/etc/etcd/kubernetes-key.pem\
/registry/secrets/default/kubernetes-the-hard-way | hexdump -C" /registry/secrets/default/kubernetes-the-hard-way \
| hexdump -C'
``` ```
> output > output
@ -59,16 +60,16 @@ The etcd key should be prefixed with `k8s:enc:aescbc:v1:key1`, which indicates t
In this section you will verify the ability to create and manage [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/). In this section you will verify the ability to create and manage [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/).
Create a deployment for the [nginx](https://nginx.org/en/) web server: Create a deployment for the [nginx](https://nginx.org/) web server:
``` ```
kubectl create deployment nginx --image=nginx kubectl create deployment nginx --image nginx
``` ```
List the pod created by the `nginx` deployment: List the pod created by the `nginx` deployment:
``` ```
kubectl get pods -l app=nginx kubectl get pods --selector app=nginx
``` ```
> output > output
@ -85,13 +86,13 @@ In this section you will verify the ability to access applications remotely usin
Retrieve the full name of the `nginx` pod: Retrieve the full name of the `nginx` pod:
``` ```
POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath="{.items[0].metadata.name}") POD_NAME="$(kubectl get pods --selector app=nginx --output jsonpath='{.items[0].metadata.name}')"
``` ```
Forward port `8080` on your local machine to port `80` of the `nginx` pod: Forward port `8080` on your local machine to port `80` of the `nginx` pod:
``` ```
kubectl port-forward $POD_NAME 8080:80 kubectl port-forward "${POD_NAME}" 8080:80
``` ```
> output > output
@ -111,13 +112,13 @@ curl --head http://127.0.0.1:8080
``` ```
HTTP/1.1 200 OK HTTP/1.1 200 OK
Server: nginx/1.19.10 Server: nginx/1.25.1
Date: Sun, 02 May 2021 05:29:25 GMT Date: Mon, 31 Jul 2023 11:17:53 GMT
Content-Type: text/html Content-Type: text/html
Content-Length: 612 Content-Length: 615
Last-Modified: Tue, 13 Apr 2021 15:13:59 GMT Last-Modified: Tue, 13 Jun 2023 15:08:10 GMT
Connection: keep-alive Connection: keep-alive
ETag: "6075b537-264" ETag: "6488865a-267"
Accept-Ranges: bytes Accept-Ranges: bytes
``` ```
@ -137,30 +138,30 @@ In this section you will verify the ability to [retrieve container logs](https:/
Print the `nginx` pod logs: Print the `nginx` pod logs:
``` ```
kubectl logs $POD_NAME kubectl logs "${POD_NAME}"
``` ```
> output > output
``` ```
... ...
127.0.0.1 - - [02/May/2021:05:29:25 +0000] "HEAD / HTTP/1.1" 200 0 "-" "curl/7.64.0" "-" 127.0.0.1 - - [31/Jul/2023:11:15:02 +0000] "HEAD / HTTP/1.1" 200 0 "-" "curl/8.0.1" "-"
``` ```
### Exec ### Exec
In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/#running-individual-commands-in-a-container). In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug/debug-application/get-shell-running-container/#running-individual-commands-in-a-container).
Print the nginx version by executing the `nginx -v` command in the `nginx` container: Print the nginx version by executing the `nginx -v` command in the `nginx` container:
``` ```
kubectl exec -ti $POD_NAME -- nginx -v kubectl exec --stdin --tty $POD_"${POD_NAME}" -- nginx -v
``` ```
> output > output
``` ```
nginx version: nginx/1.19.10 nginx version: nginx/1.25.1
``` ```
## Services ## Services
@ -173,48 +174,48 @@ Expose the `nginx` deployment using a [NodePort](https://kubernetes.io/docs/conc
kubectl expose deployment nginx --port 80 --type NodePort kubectl expose deployment nginx --port 80 --type NodePort
``` ```
> The LoadBalancer service type can not be used because your cluster is not configured with [cloud provider integration](https://kubernetes.io/docs/getting-started-guides/scratch/#cloud-provider). Setting up cloud provider integration is out of scope for this tutorial. > The LoadBalancer service type can not be used because your cluster is not configured with [cloud provider integration](https://kubernetes.io/docs/concepts/architecture/cloud-controller/). Setting up cloud provider integration is out of scope for this tutorial.
Retrieve the node port assigned to the `nginx` service: Retrieve the node port assigned to the `nginx` service:
``` ```
NODE_PORT=$(kubectl get svc nginx \ NODE_PORT="$(kubectl get svc nginx \
--output=jsonpath='{range .spec.ports[0]}{.nodePort}') --output jsonpath='{range .spec.ports[0]}{.nodePort}')"
``` ```
Create a firewall rule that allows remote access to the `nginx` node port: Create a firewall rule that allows remote access to the `nginx` node port:
``` ```
gcloud compute firewall-rules create kubernetes-the-hard-way-allow-nginx-service \ gcloud compute firewall-rules create kubernetes-the-hard-way-allow-nginx-service \
--allow=tcp:${NODE_PORT} \ --allow "tcp:${NODE_PORT}" \
--network kubernetes-the-hard-way --network kubernetes-the-hard-way
``` ```
Retrieve the external IP address of a worker instance: Retrieve the external IP address of a worker instance:
``` ```
EXTERNAL_IP=$(gcloud compute instances describe worker-0 \ EXTERNAL_IP="$(gcloud compute instances describe worker-0 \
--format 'value(networkInterfaces[0].accessConfigs[0].natIP)') --format 'value(networkInterfaces[0].accessConfigs[0].natIP)')"
``` ```
Make an HTTP request using the external IP address and the `nginx` node port: Make an HTTP request using the external IP address and the `nginx` node port:
``` ```
curl -I http://${EXTERNAL_IP}:${NODE_PORT} curl --head "http://${EXTERNAL_IP}:${NODE_PORT}"
``` ```
> output > output
``` ```
HTTP/1.1 200 OK HTTP/1.1 200 OK
Server: nginx/1.19.10 Server: nginx/1.25.1
Date: Sun, 02 May 2021 05:31:52 GMT Date: Mon, 31 Jul 2023 11:24:03 GMT
Content-Type: text/html Content-Type: text/html
Content-Length: 612 Content-Length: 615
Last-Modified: Tue, 13 Apr 2021 15:13:59 GMT Last-Modified: Tue, 13 Jun 2023 15:08:10 GMT
Connection: keep-alive Connection: keep-alive
ETag: "6075b537-264" ETag: "6488865a-267"
Accept-Ranges: bytes Accept-Ranges: bytes
``` ```
Next: [Cleaning Up](14-cleanup.md) Next: [Cleaning Up](./14-cleanup.md)

View File

@ -1,16 +1,16 @@
# Cleaning Up # Cleaning Up
In this lab you will delete the compute resources created during this tutorial. In this lab you will delete the compute resources and optionally the files and configurations created during this tutorial.
## Compute Instances ## Compute Instances
Delete the controller and worker compute instances: Delete the controller and worker compute instances:
``` ```
gcloud -q compute instances delete \ gcloud compute instances delete \
controller-0 controller-1 controller-2 \ controller-0 controller-1 controller-2 \
worker-0 worker-1 worker-2 \ worker-0 worker-1 worker-2 \
--zone $(gcloud config get-value compute/zone) --quiet
``` ```
## Networking ## Networking
@ -18,46 +18,56 @@ gcloud -q compute instances delete \
Delete the external load balancer network resources: Delete the external load balancer network resources:
``` ```
{ gcloud compute forwarding-rules delete kubernetes-forwarding-rule --quiet
gcloud -q compute forwarding-rules delete kubernetes-forwarding-rule \
--region $(gcloud config get-value compute/region)
gcloud -q compute target-pools delete kubernetes-target-pool gcloud compute target-pools delete kubernetes-target-pool --quiet
gcloud -q compute http-health-checks delete kubernetes gcloud compute http-health-checks delete kubernetes --quiet
gcloud -q compute addresses delete kubernetes-the-hard-way gcloud compute addresses delete kubernetes-the-hard-way --quiet
}
``` ```
Delete the `kubernetes-the-hard-way` firewall rules: Delete the `kubernetes-the-hard-way` firewall rules:
``` ```
gcloud -q compute firewall-rules delete \ gcloud compute firewall-rules delete \
kubernetes-the-hard-way-allow-nginx-service \
kubernetes-the-hard-way-allow-internal \
kubernetes-the-hard-way-allow-external \ kubernetes-the-hard-way-allow-external \
kubernetes-the-hard-way-allow-health-check kubernetes-the-hard-way-allow-health-check \
kubernetes-the-hard-way-allow-internal \
kubernetes-the-hard-way-allow-nginx-service \
--quiet
``` ```
Delete the `kubernetes-the-hard-way` network VPC: Delete the `kubernetes-the-hard-way` network VPC:
``` ```
{ gcloud compute routes delete \
gcloud -q compute routes delete \
kubernetes-route-10-200-0-0-24 \ kubernetes-route-10-200-0-0-24 \
kubernetes-route-10-200-1-0-24 \ kubernetes-route-10-200-1-0-24 \
kubernetes-route-10-200-2-0-24 kubernetes-route-10-200-2-0-24 \
--quiet
gcloud -q compute networks subnets delete kubernetes gcloud compute networks subnets delete kubernetes --quiet
gcloud -q compute networks delete kubernetes-the-hard-way gcloud compute networks delete kubernetes-the-hard-way --quiet
}
``` ```
Delete the `kubernetes-the-hard-way` compute address: ## Cleanup The Admin Kubernetes Configuration File
``` ```
gcloud -q compute addresses delete kubernetes-the-hard-way \ kubectl config unset current-context
--region $(gcloud config get-value compute/region)
kubectl config delete-context kubernetes-the-hard-way
kubectl config delete-user admin
kubectl config delete-cluster kubernetes-the-hard-way
```
## Cleanup the Client Tools
```
sudo rm -i /usr/local/bin/cfssl \
/usr/local/bin/cfssljson \
/usr/local/bin/kubectl
``` ```

View File

@ -1,3 +1,4 @@
---
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
@ -22,11 +23,12 @@ rules:
- list - list
- watch - watch
- apiGroups: - apiGroups:
- "" - discovery.k8s.io
resources: resources:
- nodes - endpointslices
verbs: verbs:
- get - list
- watch
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
@ -51,30 +53,61 @@ metadata:
name: coredns name: coredns
namespace: kube-system namespace: kube-system
data: data:
Corefile: | Corefile: |-
.:53 { .:53 {
errors errors
health health {
lameduck 5s
}
ready ready
kubernetes cluster.local in-addr.arpa ip6.arpa { kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure pods insecure
fallthrough in-addr.arpa ip6.arpa fallthrough in-addr.arpa ip6.arpa
ttl 30
} }
prometheus :9153 prometheus 0.0.0.0:9153
forward . /etc/resolv.conf
cache 30 cache 30
loop loop
reload reload
loadbalance loadbalance
} }
--- ---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
name: kube-dns
namespace: kube-system
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.32.0.10
ports:
- name: dns-udp
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: coredns
namespace: kube-system
labels: labels:
k8s-app: kube-dns k8s-app: kube-dns
kubernetes.io/name: "CoreDNS" kubernetes.io/name: "CoreDNS"
name: coredns
namespace: kube-system
spec: spec:
replicas: 2 replicas: 2
strategy: strategy:
@ -89,46 +122,10 @@ spec:
labels: labels:
k8s-app: kube-dns k8s-app: kube-dns
spec: spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
beta.kubernetes.io/os: linux
containers: containers:
- name: coredns - args: [ "-conf", "/etc/coredns/Corefile" ]
image: coredns/coredns:1.7.0 image: coredns/coredns:1.10.1
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /health path: /health
@ -138,12 +135,49 @@ spec:
timeoutSeconds: 5 timeoutSeconds: 5
successThreshold: 1 successThreshold: 1
failureThreshold: 5 failureThreshold: 5
name: coredns
ports:
- containerPort: 53
name: dns-udp
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
readinessProbe: readinessProbe:
httpGet: httpGet:
path: /ready path: /ready
port: 8181 port: 8181
scheme: HTTP scheme: HTTP
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
dnsPolicy: Default dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes: volumes:
- name: config-volume - name: config-volume
configMap: configMap:
@ -151,30 +185,3 @@ spec:
items: items:
- key: Corefile - key: Corefile
path: Corefile path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.32.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP