2023-11-14 17:31:09
728x90
728x90

 

2023-11-17 수정 내용:

OpenShift Container Platform 클러스터를 배포하려면 클러스터 노드 간의

NTP 서버가 동기화 되어야 한다.

동기화된 서버가 없으면 시간 차이가 2 초보다 크면 클럭 드리프트로 인해 배포 실패할 수 있다.

manifests를 생성하고 ignition 파일을 생성하기 전에 NTP 서버 동기화 작업을 해줘야 한다.

 

OpenShift 아키텍처

 

OpenShift UPI 방식 설치 인프라 구성도

 

자원 목록 

1. DNS 구성 : Project_DNS - 172.16.10.103


windows 기본 설정 진행 및 ip 설정, 컴퓨터 이름 설정
관리 -> 역할 및 기능 추가 -> DNS 서버 설치
도구 -> DNS -> 정방향 조회 영역 우클릭 후 새영역 -> team4.local 생성
역방향 조회 영역 우클릭 후 새영역 -> 172.16.10. 입력 후 생성
호스트 추가

DNS1
DNS2
DNS3

 

DNS4

2. HTTP, LoadBlance, OpenShift 구성: Project_OpenShift - 172.16.10.105

2.1. Network 설정

# vi /etc/sysconfig/network-scripts/ifcfg-ens192

-Network 구성
TYPE=Ethernet
BOOTPROTO=static
NAME=ens192
DEVICE=ens192
ONBOOT=yes
IPADDR=172.16.10.105
PREFIX=24
GATEWAY=172.16.10.1
DNS1=172.16.10.103
DNS2=168.126.63.1

# systemctl restart NetworkManager.service

/etc/sysconfig/network-scripts/ifcfg-ens192

2.2. apache 설치

# yum install -y httpd

- Listen 80 -> Listen 8080
# sed -i 's/Listen 80/Listen 8080/g' /etc/httpd/conf/httpd.conf

# systemctl enable --now httpd

/etc/httpd/conf/httpd.conf

2.3. haproxy 설치

# yum install -y haproxy
# vi /etc/haproxy/haproxy.cfg

- haproxy.cfg 내용 수정
#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   https://www.haproxy.org/download/1.8/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

    # utilize system-wide crypto-policies
    ssl-default-bind-ciphers PROFILE=SYSTEM
    ssl-default-server-ciphers PROFILE=SYSTEM

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
#    mode                    http
    mode		    tcp	
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend static
    balance     roundrobin
    server      static 127.0.0.1:4331 check

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
frontend kubernetes_api
    bind 0.0.0.0:6443
    default_backend kubernetes_api
    option tcplog

backend kubernetes_api
    balance roundrobin
    server bootstrap bootstrap.team4.team4.local:6443 check
    server master1 master1.team4.team4.local:6443 check
    server master2 master2.team4.team4.local:6443 check
    server master3 master3.team4.team4.local:6443 check
    server worker1 worker1.team4.team4.local:6443 check
    server worker2 worker2.team4.team4.local:6443 check


frontend machine_config
    bind 0.0.0.0:22623
    default_backend machine_config
    option tcplog

backend machine_config
    balance roundrobin
    server bootstrap bootstrap.team4.team4.local:22623 check
    server master1 master1.team4.team4.local:22623 check
    server master2 master2.team4.team4.local:22623 check
    server master3 master3.team4.team4.local:22623 check
    server worker1 worker1.team4.team4.local:22623 check
    server worker2 worker2.team4.team4.local:22623 check

frontend router_https
    bind 0.0.0.0:443
    default_backend router_https

backend router_https
    balance roundrobin
    server master1 master1.team4.team4.local:443 check
    server master2 master2.team4.team4.local:443 check
    server master3 master3.team4.team4.local:443 check
    server worker1 worker1.team4.team4.local:443 check
    server worker2 worker2.team4.team4.local:443 check

frontend router_http
    mode tcp
    bind 0.0.0.0:80
    default_backend router_http

backend router_http
    mode tcp
    balance roundrobin
    server master1 master1.team4.team4.local:80 check
    server master2 master2.team4.team4.local:80 check
    server master3 master3.team4.team4.local:80 check
    server worker1 worker1.team4.team4.local:80 check
    server worker2 worker2.team4.team4.local:80 check
    
# systemctl enable --now haproxy

 

haproxy.cfg 1
haproxy.cfg 2
haproxy.cfg 3
haproxy.cfg 4

2.4. 방화벽

# firewall-cmd --permanent --add-port=80/tcp
# firewall-cmd --permanent --add-port=443/tcp
# firewall-cmd --permanent --add-port=6443/tcp
# firewall-cmd --permanent --add-port=22623/tcp
# firewall-cmd --permanent --add-port=53/{tcp,udp}
# firewall-cmd --permanent --add-port=8080/tcp
# firewall-cmd --reload

방화벽 리스트

2.5. selinux : enforcing일 경우 두가지 경우 중 선택

1. 
# setsebool -P haproxy_connect_any=1


2.
# semanage port -l |grep http_port_t -- selinux에서 허용하는 http 포트 확인
# semanage port -a -t http_port_t -p tcp 22623
# semanage port -a -t http_port_t -p tcp 6443

 

2.6. ssh키 생성

# ssh-keygen -q -N ''

 

2.7. openshift-installer 설치

# wget https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/stable/openshift-install-linux.tar.gz
# wget https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/stable/openshift-client-linux.tar.gz
- https://console.redhat.com/openshift/install/pull-secret  -- pull-secret 다운로드 후 scp로 옮겨주기

# tar xvfz openshift-install-linux.tar.gz -C /usr/local/bin/
# tar xvfz openshift-client-linux.tar.gz -C /usr/local/bin/

# mkdir -p /root/ocp/config
# vi ocp/config/install-config.yaml

-- install-config.yaml 내용 수정
apiVersion: v1
baseDomain: team4.local
compute:
- hyperthreading: Enabled
  name: worker
  replicas: 2
controlPlane:
  hyperthreading: Enabled
  name: master
  replicas: 3     
metadata:
  name: team4
networking:
  clusterNetwork:
  - cidr: 10.128.0.0/14
    hostPrefix: 23
  networkType: OpenShiftSDN
  serviceNetwork:
  - 172.30.0.0/16
platform:
  none: {}
fips: false
pullSecret: 'pull-secret 복사'
sshKey: 'Public ssh-key 복사'

# cd /usr/local/bin
# openshift-install create manifests --dir=/root/ocp/config/

install-config.yaml

2.7.1. NTP 서버 동기화

-- 노드간의 시간 동기화 작업은 manifests 생성 후, ignition 생성 전에 진행
# vi /root/chrony.conf

-- chrony.conf 내용 수정
server time.bora.net iburst
stratumweight 0
driftfile /var/lib/chrony/drift
rtcsync
makestep 10 3
bindcmdaddress 127.0.0.1
bindcmdaddress ::1
keyfile /etc/chrony.keys
commandkey 1
generatecommandkey
noclientlog
logchange 0.5
logdir /var/log/chrony

# base64 /root/chrony.conf -- base64 인코딩 값 확인

# vi /root/99_{masters,workers}-chrony-configuration.yaml

-- chrony-configuration.yaml 파일 내용 수정, master와 worker 각각 생성
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:
  creationTimestamp: null
  labels:
    machineconfiguration.openshift.io/role: {master,worker}
  name: 99-master-etc-chrony-conf
spec:
  config:
    ignition:
      config: {}
      security:
        tls: {}
      timeouts: {}
      version: 3.1.0
    networkd: {}
    passwd: {}
    storage:
      files:
      - contents:
          source: data:text/plain;charset=utf-8;base64,[base64 인코딩 값 넣기]
        group:
          name: root
        mode: 420
        overwrite: true
        path: /etc/chrony.conf
        user:
          name: root
  osImageURL: ""


# cp /root/99_{masters,workers}-chrony-configuration.yaml /root/ocp/config/openshift/

# openshift-install create ignition-configs --dir /root/ocp/config/

# mkdir /var/www/html/ign
# cp /root/ocp/config/*.ign /var/www/html/ign/
# chmod 777 /var/www/html/ign/*.ign

# systemctl restart httpd

 

chrony.conf
chrony.conf base64 인코딩 값
chrony-configuration.yaml
manifests 생성
openshift 디렉토리에 넣어주고 확인

3. Openshift 배포

3.1. Bootstrap ignition 배포 : Project_bootstrap - 172.16.10.106

- RHCOS 4.13 설치

# nmtui

-- Network 설정
ipv4 address 172.16.10.106/24
gateway 172.16.10.1
dns 172.16.10.103

-- IP설정과 DNS 구성이 잘 되있으면 hostname이 자동적으로 바뀜
# hostname

# sudo -i
# coreos-installer install --ignition-url=http://172.16.10.105:8080/ign/bootstrap.ign /dev/sda --insecure-ignition --copy-network

# reboot

 

3.2. Master ingition 배포 : Project_master1,2,3 - 172.16.10.{107,108,109}

-RHCOS 4.13 설치

# nmtui

-- Network 설정
ipv4 address 172.16.10.{107,108,109}/24
gateway 172.16.10.1
dns 172.16.10.103

# hostname

# sudo coreos-installer install --ignition-url=http://172.16.10.105:8080/ign/master.ign /dev/sda --insecure-ignition --copy-network

# reboot

 

3.3. bootstrap-complete : Project_Openshift에서 진행

# export KUBECONFIG=/root/ocp/config/auth/kubeconfig
# oc get nodes -- master1,2,3 ready로 바뀔때 까지 기다리고 바뀌면

# chmod 600 /root/.ssh/id_rsa
# ssh -i /root/.ssh/id_rsa core@172.16.10.{107,108,109}

# timedatectl set-timezone Asia/Seoul
# systemctl status chronyd -< NTP 서버 동기화 확인
# exit

-- bootstrap-complete 실행
#./openshift-install wait-for bootstrap-complete  --log-level=info --dir=/root/ocp/config/

 

3.4. Worker ignition 배포 : Project_worker1,2 - 172.16.10.{110,111}

- RHCOS 설치

# nmtui

-- Network 설정
ipv4 address 172.16.10.{110,111}/24
gateway 172.16.10.1
dns 172.16.10.103

# hostname

# sudo coreos-installer install --ignition-url=http://172.16.10.105:8080/ign/worker.ign /dev/sda --insecure-ignition --copy-network

# reboot

 

3.5. Worker Node CSR 승인 및 NTP 서버 동기화 확인 :  Project_Openshift에서 진행

-- csr 확인, pending 상태를  approve로 바꿔 줘야함
# oc get csr

# cd /usr/local/bin/
# vi csr.sh

-- csr.sh 내용 수정
#!bin/bash

# Loop through CSR names starting whit "csr-"
for csr_name in $(oc get csr | grep -o '^csr-[a-zA-Z0-9]*'); do
    # Approve the CSR
    oc adm certificate approve "$csr_name"
done

-- script 실행
# sh csr.sh

-- worker 상태확인
# oc get csr

# ssh -i /root/.ssh/id_rsa core@172.16.10.{110,111}

# timedatectl set-timezone Asia/Seoul
# systemctl status chronyd

# exit

 

csr.sh

3.5. Install-complete 실행 : Project_Openshift에서 진행

# ./openshift-install wait-for install-complete --dir=/root/ocp/config/ --log-level=debug

설치 완료 화면 console 주소,user id, password 확인

4. CentOS Chrome 설치 : Project_Openshift에서 진행

# wget https://dl.google.com/linux/direct/google-chrome-stable_current_x86_64.rpm
# yum localinstall google-chrome-stable_current_x86_64.rpm

 

콘솔 접속후 https를 사용하지 않으므로 http로 들어가준다
설치 완료했을때 나온 user id와 password 입력
성공!

 

728x90