반응형
SMALL
Kind 이용
kind cluster 생성 Configuration
- ingress 및 local regitstry 생성
cat <<EOF | kind create cluster --name $CLUSTER_NAME --config=-
---
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
# only allow the ingress controller to run on a specific node(s) matching the label selector
node-labels: "ingress-ready=true"
# allow the local host to make requests to the Ingress controller over ports 80/443
extraPortMappings:
- containerPort: 80
hostPort: ${ingress_http_port}
protocol: TCP
- containerPort: 443
hostPort: ${ingress_https_port}
protocol: TCP
#networking:
# kubeProxyMode: "ipvs"
# create a cluster with the local registry enabled in containerd
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
endpoint = ["http://${reg_name}:${reg_port}"]
EOF
running="$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)"
if [ "${running}" != 'true' ]; then
docker run \
--detach \
--restart always \
--name "${reg_name}" \
--publish "${reg_interface}:${reg_port}:5000" \
registry:2
fi
docker network connect "kind" "${reg_name}" || true
cat <<EOF | kubectl apply -f -
---
apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: "localhost:${reg_port}"
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
EOF
kind cluster 삭제 및 local registry 삭제
kind delete cluster --name $CLUSTER_NAME
docker kill kind-registry || true
docker rm kind-registry || true
kind namespace 생성 및 삭제
kubectl create namespace $ns || true
kubectl delete namespace $ns || true
storage volume 초기화
export STORAGE_CLASS="standard"
# pvc-fabric-org0~2.yaml 모두 적용
cat kube/pvc-fabric-org0.yaml | envsubst | kubectl -n $ORG0_NS create -f - || true
# pvc-fabric-org0.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: fabric-org0
spec:
accessModes:
- ReadWriteOnce
storageClassName: ${STORAGE_CLASS}
resources:
requests:
storage: 1Gi
load organization's config
# org0~2-config 적용
kubectl -n $ORG0_NS delete configmap org0-config || true
# config/org0~2 적용
# org0 - orderer.yaml, fabri-ca-server-config.yaml configtx-template.yaml
# org1~2 - core.yaml, fabric-ca-server-config.yaml
kubectl -n $ORG0_NS create configmap org0-config --from-file=config/org0
TLS CA issuers 초기화
# ORG0~2_NS 적용
kubectl -n $ORG0_NS apply -f kube/root-tls-cert-issuer.yaml
kubectl -n $ORG0_NS wait --timeout=30s --for=condition=Ready issuer/root-tls-cert-issuer
# root-tls-cert-issuer.yaml
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: root-tls-cert-issuer
spec:
selfSigned: {}
# Use the self-signing issuer to generate three Issuers, one for each org.
# ORG0~2_NS 적용
kubectl -n $ORG0_NS apply -f kube/org0/org0-tls-cert-issuer.yaml
kubectl -n $ORG0_NS wait --timeout=30s --for=condition=Ready issuer/org0-tls-cert-issuer
# org0-tls-cert-issuer.yaml
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: org0-tls-cert-issuer
spec:
isCA: true
privateKey:
algorithm: ECDSA
size: 256
commonName: org0.example.com
secretName: org0-tls-cert-issuer-secret
issuerRef:
name: root-tls-cert-issuer
kind: Issuer
group: cert-manager.io
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: org0-tls-cert-issuer
spec:
ca:
secretName: org0-tls-cert-issuer-secret
launching Fabri CAs
# kube/org0~2/org0~2.ca.yaml, $ORG0~2 적용
apply_template kube/org0/org0-ca.yaml $ORG0_NS
# org0-ca.yaml
#
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: org0-ca-tls-cert
spec:
isCA: false
privateKey:
algorithm: ECDSA
size: 256
dnsNames:
- localhost
- org0-ca
- org0-ca.${ORG0_NS}.svc.cluster.local
- org0-ca.${DOMAIN}
ipAddresses:
- 127.0.0.1
secretName: org0-ca-tls-cert
issuerRef:
name: org0-tls-cert-issuer
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: org0-ca
spec:
replicas: 1
selector:
matchLabels:
app: org0-ca
template:
metadata:
labels:
app: org0-ca
spec:
containers:
- name: main
image: ${FABRIC_CONTAINER_REGISTRY}/fabric-ca:${FABRIC_CA_VERSION}
imagePullPolicy: IfNotPresent
env:
- name: FABRIC_CA_SERVER_CA_NAME
value: "org0-ca"
- name: FABRIC_CA_SERVER_DEBUG
value: "false"
- name: FABRIC_CA_SERVER_HOME
value: "/var/hyperledger/fabric-ca-server"
- name: FABRIC_CA_SERVER_TLS_CERTFILE
value: "/var/hyperledger/fabric/config/tls/tls.crt"
- name: FABRIC_CA_SERVER_TLS_KEYFILE
value: "/var/hyperledger/fabric/config/tls/tls.key"
- name: FABRIC_CA_CLIENT_HOME
value: "/var/hyperledger/fabric-ca-client"
ports:
- containerPort: 443
volumeMounts:
- name: fabric-volume
mountPath: /var/hyperledger
- name: fabric-config
mountPath: /var/hyperledger/fabric-ca-server/fabric-ca-server-config.yaml
subPath: fabric-ca-server-config.yaml
- name: tls-cert-volume
mountPath: /var/hyperledger/fabric/config/tls
readOnly: true
readinessProbe:
tcpSocket:
port: 443
initialDelaySeconds: 2
periodSeconds: 5
volumes:
- name: fabric-volume
persistentVolumeClaim:
claimName: fabric-org0
- name: fabric-config
configMap:
name: org0-config
- name: tls-cert-volume
secret:
secretName: org0-ca-tls-cert
---
apiVersion: v1
kind: Service
metadata:
name: org0-ca
spec:
ports:
- name: https
port: 443
protocol: TCP
selector:
app: org0-ca
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/proxy-connect-timeout: 60s
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
labels:
app: org0-ca
name: org0-ca
spec:
ingressClassName: nginx
rules:
- host: org0-ca.${DOMAIN}
http:
paths:
- backend:
service:
name: org0-ca
port:
name: https
path: /
pathType: ImplementationSpecific
tls:
- hosts:
- org0-ca.${DOMAIN}
# $ORG0~2_NS 적용
kubectl -n $ORG0_NS rollout status deploy/org0-ca
enroll bootstrap fabric ca user ( org0 - orderer, org1 - peer, org2 - peer )
kubectl -n $ns get secret ${CA_NAME}-tls-cert -o json \
| jq -r .data.\"ca.crt\" \
| base64 -d \
> ${CA_DIR}/tlsca-cert.pem
# Enroll the root CA user
fabric-ca-client enroll \
--url https://${RCAADMIN_USER}:${RCAADMIN_PASS}@${CA_NAME}.${DOMAIN}:${NGINX_HTTPS_PORT} \
--tls.certfiles $TEMP_DIR/cas/${CA_NAME}/tlsca-cert.pem \
--mspdir $TEMP_DIR/enrollments/${org}/users/${RCAADMIN_USER}/msp
cat <<EOF | kubectl -n ${ns} exec deploy/${ca_name} -i -- /bin/sh
set -x
export FABRIC_CA_CLIENT_HOME=/var/hyperledger/fabric-ca-client
export FABRIC_CA_CLIENT_TLS_CERTFILES=/var/hyperledger/fabric/config/tls/ca.crt
fabric-ca-client enroll \
--url https://${id_name}:${id_secret}@${ca_name} \
--csr.hosts ${csr_hosts} \
--mspdir /var/hyperledger/fabric/organizations/${node_type}Organizations/${org}.example.com/${node_type}s/${id_name}.${org}.example.com/msp
# Create local MSP config.yaml
echo "NodeOUs:
Enable: true
ClientOUIdentifier:
Certificate: cacerts/${org}-ca.pem
OrganizationalUnitIdentifier: client
PeerOUIdentifier:
Certificate: cacerts/${org}-ca.pem
OrganizationalUnitIdentifier: peer
AdminOUIdentifier:
Certificate: cacerts/${org}-ca.pem
OrganizationalUnitIdentifier: admin
OrdererOUIdentifier:
Certificate: cacerts/${org}-ca.pem
OrganizationalUnitIdentifier: orderer" > /var/hyperledger/fabric/organizations/${node_type}Organizations/${org}.example.com/${node_type}s/${id_name}.${org}.example.com/msp/config.yaml
EOF
launching orderer
# orderer1~3.yaml 적용
apply_template kube/org0/org0-orderer1.yaml $ORG0_NS
kubectl -n $ORG0_NS rollout status deploy/org0-orderer1
# org0-orderer1
#
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: org0-orderer1-tls-cert
namespace: ${ORG0_NS}
spec:
isCA: false
privateKey:
algorithm: ECDSA
size: 256
dnsNames:
- localhost
- org0-orderer1
- org0-orderer1.${ORG0_NS}.svc.cluster.local
- org0-orderer1.${DOMAIN}
- org0-orderer1-admin.${DOMAIN}
ipAddresses:
- 127.0.0.1
secretName: org0-orderer1-tls-cert
issuerRef:
name: org0-tls-cert-issuer
---
apiVersion: v1
kind: ConfigMap
metadata:
name: org0-orderer1-env
data:
FABRIC_CFG_PATH: /var/hyperledger/fabric/config
FABRIC_LOGGING_SPEC: INFO # debug:cauthdsl,policies,msp,common.configtx,common.channelconfig=info
ORDERER_GENERAL_LISTENADDRESS: 0.0.0.0
ORDERER_GENERAL_LISTENPORT: "6050"
ORDERER_GENERAL_LOCALMSPID: OrdererMSP
ORDERER_GENERAL_LOCALMSPDIR: /var/hyperledger/fabric/organizations/ordererOrganizations/org0.example.com/orderers/org0-orderer1.org0.example.com/msp
ORDERER_GENERAL_TLS_ENABLED: "true"
ORDERER_GENERAL_TLS_CERTIFICATE: /var/hyperledger/fabric/config/tls/tls.crt
ORDERER_GENERAL_TLS_ROOTCAS: /var/hyperledger/fabric/config/tls/ca.crt
ORDERER_GENERAL_TLS_PRIVATEKEY: /var/hyperledger/fabric/config/tls/tls.key
ORDERER_GENERAL_BOOTSTRAPMETHOD: none
ORDERER_ADMIN_TLS_ENABLED: "true"
ORDERER_ADMIN_TLS_CERTIFICATE: /var/hyperledger/fabric/config/tls/tls.crt
ORDERER_ADMIN_TLS_ROOTCAS: /var/hyperledger/fabric/config/tls/ca.crt
ORDERER_ADMIN_TLS_PRIVATEKEY: /var/hyperledger/fabric/config/tls/tls.key
# Authenticate client connections with the org's ecert / admin user enrollments
ORDERER_ADMIN_TLS_CLIENTROOTCAS: "[/var/hyperledger/fabric/organizations/ordererOrganizations/org0.example.com/orderers/org0-orderer1.org0.example.com/msp/cacerts/org0-ca.pem]"
ORDERER_FILELEDGER_LOCATION: /var/hyperledger/fabric/data/orderer1
ORDERER_CONSENSUS_WALDIR: /var/hyperledger/fabric/data/orderer1/etcdraft/wal
ORDERER_CONSENSUS_SNAPDIR: /var/hyperledger/fabric/data/orderer1/etcdraft/wal
ORDERER_OPERATIONS_LISTENADDRESS: 0.0.0.0:8443
ORDERER_ADMIN_LISTENADDRESS: 0.0.0.0:9443
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: org0-orderer1
spec:
replicas: 1
selector:
matchLabels:
app: org0-orderer1
template:
metadata:
labels:
app: org0-orderer1
spec:
containers:
- name: main
image: ${FABRIC_CONTAINER_REGISTRY}/fabric-orderer:${FABRIC_VERSION}
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:
name: org0-orderer1-env
ports:
- containerPort: 6050
- containerPort: 8443
- containerPort: 9443
volumeMounts:
- name: fabric-volume
mountPath: /var/hyperledger
- name: fabric-config
mountPath: /var/hyperledger/fabric/config
- name: tls-cert-volume
mountPath: /var/hyperledger/fabric/config/tls
readOnly: true
volumes:
- name: fabric-volume
persistentVolumeClaim:
claimName: fabric-org0
- name: fabric-config
configMap:
name: org0-config
- name: tls-cert-volume
secret:
secretName: org0-orderer1-tls-cert
---
apiVersion: v1
kind: Service
metadata:
name: org0-orderer1
spec:
ports:
- name: general
port: 6050
protocol: TCP
- name: operations
port: 8443
protocol: TCP
- name: admin
port: 9443
protocol: TCP
selector:
app: org0-orderer1
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/proxy-connect-timeout: 60s
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
labels:
app: org0-orderer1
name: org0-orderer1
spec:
ingressClassName: nginx
rules:
- host: org0-orderer1.${DOMAIN}
http:
paths:
- backend:
service:
name: org0-orderer1
port:
name: general
path: /
pathType: ImplementationSpecific
- host: org0-orderer1-admin.${DOMAIN}
http:
paths:
- backend:
service:
name: org0-orderer1
port:
name: admin
path: /
pathType: ImplementationSpecific
tls:
- hosts:
- org0-orderer1.${DOMAIN}
- hosts:
- org0-orderer1-admin.${DOMAIN}
launching peer
# org1~2-peer1~2.yaml 적용
apply_template kube/org1/org1-peer1.yaml $ORG1_NS
kubectl -n $ORG1_NS rollout status deploy/org1-peer1
# org1-peer1.yaml
#
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: org1-peer1-tls-cert
namespace: ${ORG1_NS}
spec:
isCA: false
privateKey:
algorithm: ECDSA
size: 256
dnsNames:
- localhost
- org1-peer1
- org1-peer1.${ORG1_NS}.svc.cluster.local
- org1-peer1.${DOMAIN}
- org1-peer-gateway-svc
- org1-peer-gateway-svc.${DOMAIN}
ipAddresses:
- 127.0.0.1
secretName: org1-peer1-tls-cert
issuerRef:
name: org1-tls-cert-issuer
---
apiVersion: v1
kind: ConfigMap
metadata:
name: org1-peer1-config
data:
FABRIC_CFG_PATH: /var/hyperledger/fabric/config
FABRIC_LOGGING_SPEC: "debug:cauthdsl,policies,msp,grpc,peer.gossip.mcs,gossip,leveldbhelper=info"
CORE_PEER_TLS_ENABLED: "true"
CORE_PEER_TLS_CERT_FILE: /var/hyperledger/fabric/config/tls/tls.crt
CORE_PEER_TLS_KEY_FILE: /var/hyperledger/fabric/config/tls/tls.key
CORE_PEER_TLS_ROOTCERT_FILE: /var/hyperledger/fabric/config/tls/ca.crt
CORE_PEER_ID: org1-peer1.org1.example.com
CORE_PEER_ADDRESS: org1-peer1:7051
CORE_PEER_LISTENADDRESS: 0.0.0.0:7051
CORE_PEER_CHAINCODEADDRESS: org1-peer1:7052
CORE_PEER_CHAINCODELISTENADDRESS: 0.0.0.0:7052
# bootstrap peer is the other peer in the same org
CORE_PEER_GOSSIP_BOOTSTRAP: org1-peer2:7051
CORE_PEER_GOSSIP_EXTERNALENDPOINT: org1-peer1.${ORG1_NS}.svc.cluster.local:7051
CORE_PEER_LOCALMSPID: Org1MSP
CORE_PEER_MSPCONFIGPATH: /var/hyperledger/fabric/organizations/peerOrganizations/org1.example.com/peers/org1-peer1.org1.example.com/msp
CORE_OPERATIONS_LISTENADDRESS: 0.0.0.0:9443
CORE_PEER_FILESYSTEMPATH: /var/hyperledger/fabric/data/org1-peer1.org1.example.com
CORE_LEDGER_SNAPSHOTS_ROOTDIR: /var/hyperledger/fabric/data/org1-peer1.org1.example.com/snapshots
CHAINCODE_AS_A_SERVICE_BUILDER_CONFIG: "{\"peername\":\"org1peer1\"}"
CORE_LEDGER_STATE_STATEDATABASE: CouchDB
CORE_LEDGER_STATE_COUCHDBCONFIG_MAXRETRIESONSTARTUP: "20"
CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS: localhost:5984
CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME: admin
CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD: adminpw
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: org1-peer1
spec:
replicas: 1
selector:
matchLabels:
app: org1-peer1
template:
metadata:
labels:
app: org1-peer1
org: org1
spec:
containers:
- name: main
image: ${FABRIC_PEER_IMAGE}
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:
name: org1-peer1-config
ports:
- containerPort: 7051
- containerPort: 7052
- containerPort: 9443
volumeMounts:
- name: fabric-volume
mountPath: /var/hyperledger
- name: fabric-config
mountPath: /var/hyperledger/fabric/config
- name: tls-cert-volume
mountPath: /var/hyperledger/fabric/config/tls
readOnly: true
- name: couchdb
image: couchdb:3.2.1
imagePullPolicy: IfNotPresent
env:
- name: "COUCHDB_USER"
value: "admin"
- name: "COUCHDB_PASSWORD"
value: "adminpw"
ports:
- containerPort: 5984
volumes:
- name: fabric-volume
persistentVolumeClaim:
claimName: fabric-org1
- name: fabric-config
configMap:
name: org1-config
- name: tls-cert-volume
secret:
secretName: org1-peer1-tls-cert
---
apiVersion: v1
kind: Service
metadata:
name: org1-peer1
spec:
ports:
- name: grpc
port: 7051
protocol: TCP
- name: chaincode
port: 7052
protocol: TCP
- name: operations
port: 9443
protocol: TCP
selector:
app: org1-peer1
---
apiVersion: v1
kind: Service
metadata:
name: org1-peer-gateway-svc
spec:
ports:
- name: grpc
port: 7051
protocol: TCP
selector:
org: org1
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/proxy-connect-timeout: 60s
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
labels:
app: org1-peer1
name: org1-peer1
spec:
ingressClassName: nginx
rules:
- host: org1-peer1.${DOMAIN}
http:
paths:
- backend:
service:
name: org1-peer1
port:
name: grpc
path: /
pathType: ImplementationSpecific
- host: org1-peer-gateway-svc.${DOMAIN}
http:
paths:
- backend:
service:
name: org1-peer1
port:
name: grpc
path: /
pathType: ImplementationSpecific
tls:
- hosts:
- org1-peer1.${DOMAIN}
- hosts:
- org1-peer-gateway-svc.${DOMAIN}
반응형
LIST
'Blockchain' 카테고리의 다른 글
Near 블록 데이터 구조 (0) | 2024.11.07 |
---|---|
[Blockchain] 하이퍼레저 패브릭(Hyperledger Fabric) (0) | 2022.08.12 |
[Ethereum] 이더리움 알아보기 (0) | 2022.02.21 |
[Blockchain] 하이퍼레저 컴포저(Hyperledger Composer) (0) | 2021.07.06 |
[Blockchain] 하이퍼레저 패브릭(Hyperledger Fabric) 환경 구성 (0) | 2021.07.06 |