#!/bin/bash APISERVER=$(kubectl config view | grep server | cut -f 2- -d ":" | tr -d " ") TOKEN=$(kubectl config view | grep token | cut -f 2- -d ":" | tr -d " ") echo "APISERVER=$APISERVER" echo "TOKEN=$TOKEN" curl -v -k -H "Authorization: Bearer $TOKEN" $APISERVER/api/v1 curl -v -k -H "Authorization: Bearer $TOKEN" $APISERVER/api/v1/nodes curl -v -k -H "Authorization: Bearer $TOKEN" $APISERVER/api/v1/pods | cs |
분류 전체보기
- How to access k8s APIs 2018.05.02
- GlusterFS Setup on CentOS 7 Clusters 2018.04.29
- Loopback device 2018.04.24
- HTTP Proxy by Squid on CentOS 7 2018.03.27
- Tip - How to create a file with multiple lines on a Dockerfile 2018.03.14
- Installation of Hadoop 2.7.5 on CentOS 7 Cluster 2018.03.09
- How to check # of CPU on Mac 2018.03.09
- How to list up all users 2018.03.09
- How to download Oracle JDK using CLI 2018.03.09
- How to check default heap size of JVM 2018.03.09
How to access k8s APIs
GlusterFS Setup on CentOS 7 Clusters
Prerequisites
Servers
Hostname |
IP Address |
ebdp-po-dkr10d.sys.comcast.net |
147.191.72.175 |
ebdp-po-dkr11d.sys.comcast.net |
147.191.72.176 |
ebdp-po-dkr12d.sys.comcast.net |
147.191.74.184 |
Installation
# rpm -qa | grep gluster glusterfs-3.8.15-2.el7.x86_64 glusterfs-server-3.8.15-2.el7.x86_64 glusterfs-libs-3.8.15-2.el7.x86_64 glusterfs-api-3.8.15-2.el7.x86_64 glusterfs-cli-3.8.15-2.el7.x86_64 glusterfs-client-xlators-3.8.15-2.el7.x86_64 glusterfs-fuse-3.8.15-2.el7.x86_64 | cs |
Other Settings
Setup
Prepare Partitions
The VMs cannot have more partitions, loopback devices are used.
# losetup -l NAME SIZELIMIT OFFSET AUTOCLEAR RO BACK-FILE /dev/loop101 0 0 0 0 /app/work/glusterfs/disks/001.img /dev/loop102 0 0 0 0 /app/work/glusterfs/disks/002.img /dev/loop103 0 0 0 0 /app/work/glusterfs/disks/003.img | cs |
Mount Partitions
# df -h Filesystem Size Used Avail Use% Mounted on .... /dev/loop101 20G 33M 20G 1% /app/work/glusterfs/bricks/brick1 /dev/loop102 20G 33M 20G 1% /app/work/glusterfs/bricks/brick2 /dev/loop103 20G 33M 20G 1% /app/work/glusterfs/bricks/brick3 | cs |
Connect
# gluster peer probe ebdp-po-dkr11d.sys.comcast.net peer probe: success. # gluster peer probe ebdp-po-dkr12d.sys.comcast.net peer probe: success. # gluster peer status Number of Peers: 2 Hostname: ebdp-po-dkr11d.sys.comcast.net Uuid: 868b4330-5667-46ba-9dad-ec4181b4c623 State: Peer in Cluster (Connected) Hostname: ebdp-po-dkr12d.sys.comcast.net Uuid: 55c36364-0d44-4359-9e58-a23f5b89c79e State: Peer in Cluster (Connected) | cs |
Create Volume
# gluster volume create gluster-volume-001 \ replica 3 \ transport tcp \ ebdp-po-dkr10d.sys.comcast.net:/app/work/glusterfs/bricks/brick1/brick \ ebdp-po-dkr11d.sys.comcast.net:/app/work/glusterfs/bricks/brick1/brick \ ebdp-po-dkr12d.sys.comcast.net:/app/work/glusterfs/bricks/brick1/brick volume create: gluster-volume-001: success: please start the volume to access data # gluster volume start gluster-volume-001 volume start: gluster-volume-001: success # gluster volume status Status of volume: gluster-volume-001 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick ebdp-po-dkr10d.sys.comcast.net:/app/w ork/glusterfs/bricks/brick1/brick 49152 0 Y 4169 Brick ebdp-po-dkr11d.sys.comcast.net:/app/w ork/glusterfs/bricks/brick1/brick 49152 0 Y 4657 Brick ebdp-po-dkr12d.sys.comcast.net:/app/w ork/glusterfs/bricks/brick1/brick 49152 0 Y 4588 Self-heal Daemon on localhost N/A N/A Y 4189 Self-heal Daemon on ebdp-po-dkr12d.sys.comc ast.net N/A N/A Y 4611 Self-heal Daemon on ebdp-po-dkr11d.sys.comc ast.net N/A N/A Y 4678 Task Status of Volume gluster-volume-001 ------------------------------------------------------------------------------ There are no active volume tasks # gluster volume info all Volume Name: gluster-volume-001 Type: Replicate Volume ID: 5f00bb5a-b977-4cad-8afe-df4abfbd1f35 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: ebdp-po-dkr10d.sys.comcast.net:/app/work/glusterfs/bricks/brick1/brick Brick2: ebdp-po-dkr11d.sys.comcast.net:/app/work/glusterfs/bricks/brick1/brick Brick3: ebdp-po-dkr12d.sys.comcast.net:/app/work/glusterfs/bricks/brick1/brick Options Reconfigured: transport.address-family: inet performance.readdir-ahead: on nfs.disable: onColored by Color Scripter | cs |
Mount GlusterFS Volume from another server
# rpm -qa | grep gluster glusterfs-client-xlators-3.8.4-18.4.el7.centos.x86_64 glusterfs-fuse-3.8.4-18.4.el7.centos.x86_64 glusterfs-libs-3.8.4-18.4.el7.centos.x86_64 glusterfs-3.8.4-18.4.el7.centos.x86_64 | cs |
Then, the glusterfs volume can be mounted as below.
# mount -t glusterfs ebdp-po-dkr10d.sys.comcast.net:/gluster-volume-001 /mnt/gluster-volume/ | cs |
Once a file is created in the mounted directory, it's found from the original directory of 3 VMs.
Tear Down
# gluster volume stop gluster-volume-001 Stopping volume will make its data inaccessible. Do you want to continue? (y/n) y volume stop: gluster-volume-001: success # gluster volume delete gluster-volume-001 Deleting volume will erase all information about the volume. Do you want to continue? (y/n) y volume delete: gluster-volume-001: success # gluster peer detach ebdp-po-dkr11d.sys.comcast.net peer detach: success # gluster peer detach ebdp-po-dkr12d.sys.comcast.net peer detach: success # gluster peer status Number of Peers: 0 | cs |
Loopback device
Create an image file
# dd if=/dev/zero of=./disk1.img bs=1M count=5120 5120+0 records in 5120+0 records out 5368709120 bytes (5.4 GB) copied, 222.903 s, 24.1 MB/s | cs |
Attach the image file as a loopback device
# losetup -f /app/work/glusterfs/disk/disk1.img # losetup -l NAME SIZELIMIT OFFSET AUTOCLEAR RO BACK-FILE /dev/loop0 0 0 0 0 /app/work/glusterfs/disk/disk1.img | cs |
Format
# mkfs.xfs /dev/loop0 meta-data=/dev/loop0 isize=512 agcount=4, agsize=327680 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=0, sparse=0 data = bsize=4096 blocks=1310720, imaxpct=25 = sunit=0 swidth=0 blks naming =version 2 bsize=4096 ascii-ci=0 ftype=1 log =internal log bsize=4096 blocks=2560, version=2 = sectsz=512 sunit=0 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 | cs |
Mount
# mkdir /mnt/disk1 # mount /dev/loop0 /mnt/disk1 | cs |
Unmount & Detach
# umount /mnt/disk1 # losetup -d /dev/loop0 | cs |
'OS - Linux' 카테고리의 다른 글
How to list up all users (0) | 2018.03.09 |
---|
HTTP Proxy by Squid on CentOS 7
Installation
yum install -y squid | cs |
Configuration
visible_hostname localhost acl all src 0.0.0.0/0.0.0.0 http_access allow all http_port 3128 | cs |
You need to restart the squid service after changing the configuration.
systemctl restart squid.service | cs |
Configuration on Client
export http_proxy=http://$HTTP_PROXY_HOSTNAME:3128 export https_proxy=http://$HTTP_PROXY_HOSTNAME:3128 | cs |
Test
# curl -v http://www.google.com * About to connect() to www.google.com port 80 (#0) * Trying 172.217.3.228... | cs |
After the proxy configuration, the result should be as following.
# curl -v http://www.google.com * About to connect() to proxy ebdp-po-dkr10d.sys.comcast.net port 3128 (#0) * Trying 147.191.72.175... * Connected to ebdp-po-dkr10d.sys.comcast.net (147.191.72.175) port 3128 (#0) > GET http://www.google.com/ HTTP/1.1 > User-Agent: curl/7.29.0 > Host: www.google.com > Accept: */* > Proxy-Connection: Keep-Alive > < HTTP/1.1 200 OK .... | cs |
Tip - How to create a file with multiple lines on a Dockerfile
EOF can be used for creating a file with multiple lines in shell script as following.
cat << EOF > /tmp/yourfilehere These contents will be written to the file. This line is indented. EOF | cs |
https://stackoverflow.com/questions/2953081/how-can-i-write-a-heredoc-to-a-file-in-bash-script
However, this cannot be applied to Dockerfile.
Instead, the following way can be used in a similar way.
RUN echo $'[user]\n\ email = bumjoon_kim@comcast.com\n\ name = Bumjoon Kim\n[push]\n\ default = current\n' >> /root/.gitconfig | cs |
Installation of Hadoop 2.7.5 on CentOS 7 Cluster
Prerequisites
Servers
Hostname |
IP Address |
Type |
ebdp-po-dkr10d.sys.comcast.net |
147.191.72.175 |
Master |
ebdp-po-dkr11d.sys.comcast.net |
147.191.72.176 |
Slave |
ebdp-po-dkr12d.sys.comcast.net |
147.191.74.184 |
Slave |
JDK 1.8
# echo $JAVA_HOME /usr/java/jdk1.8.0_131 | cs |
User for Hadoop
Passwordless SSH
# su - hduser # ssh-keygen -t dsa -P "" -f ~/.ssh/id_dsa | cs |
Then, the public key of each node needs to be registered in authorized_keys of other nodes (including itself).
Here is the example of ~/.ssh/authorized_keys.
ssh-dss AAAA...HD3no= hduser@ebdp-po-dkr10d.sys.comcast.net ssh-dss AAAA...YBnYs= hduser@ebdp-po-dkr11d.sys.comcast.net ssh-dss AAAA...mREIg== hduser@ebdp-po-dkr12d.sys.comcast.net | cs |
The permission should be changed as following.
# chmod go-w $HOME $HOME/.ssh # chmod 600 $HOME/.ssh/authorized_keys # chown hduser $HOME/.ssh/authorized_keys | cs |
Finally, to access other nodes with a shortcut, ~/.ssh/config should contain the following modifications.
(Note that "localhost" should be adjusted according to its hostname.)
Host dk10 HostName ebdp-po-dkr10d.sys.comcast.net User hduser Host dk11 HostName ebdp-po-dkr11d.sys.comcast.net User hduser Host dk12 HostName ebdp-po-dkr12d.sys.comcast.net User hduser Host localhost HostName ebdp-po-dkr10d.sys.comcast.net User hduser | cs |
Installation
Download
Untar the tarball at a directory
Environment Variables
export JAVA_HOME=/usr/java/jdk1.8.0_131 export HADOOP_HOME=/app/bigdata/hadoop export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop export PATH=$PATH:$HADOOP_HOME/bin export HDFS_NAMENODE_USER="hduser" export HDFS_DATANODE_USER="hduser" export HDFS_SECONDARYNAMENODE_USER="hduser" export YARN_RESOURCEMANAGER_USER="hduser" export YARN_NODEMANAGER_USER="hduser" | cs |
Configurations
Masters and Slaves
It just lists up the nodes of master and slave.
# echo "ebdp-po-dkr10d.sys.comcast.net" >> $HADOOP_CONF_DIR/masters # echo "ebdp-po-dkr11d.sys.comcast.net" >> $HADOOP_CONF_DIR/slaves # echo "ebdp-po-dkr12d.sys.comcast.net" >> $HADOOP_CONF_DIR/slaves | cs |
core-site.xml
<configuration> <property> <name>fs.defaultFS</name> <value>hdfs://ebdp-po-dkr10d.sys.comcast.net:54310</value> <description>The name of the default file system.</description> </property> <property> <name>hadoop.tmp.dir</name> <value>/app/hadoop/hadoop/tmp</value> <description>A base for other temporary directories.</description> </property> </configuration> | cs |
hdfs-site.xml
<configuration> <property> <name>dfs.replication</name> <value>2</value> <description>Default block replication</description> </property> <property> <name>dfs.namenode.name.dir</name> <value>/app/bigdata/hadoop/namedir</value> <final>true</final> </property> <property> <name>dfs.datanode.data.dir</name> <value>/app/bigdata/hadoop/datadir</value> <final>true</final> </property> <property> <name>dfs.permissions</name> <value>false</value> </property> <property> <name>dfs.namenode.secondary.http-address</name> <value>ebdp-po-dkr10d.sys.comcast.net:50090</value> </property> </configuration> | cs |
mapred-site.xml
<configuration> <property> <name>mapred.job.tracker</name> <value>ebdp-po-dkr10d.sys.comcast.net:54311</value> <description>Map Reduce jobtracker</description> </property> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapred.local.dir</name> <value>/app/bigdata/hadoop/mapred-localdir</value> </property> <property> <name>mapred.system.dir</name> <value>/app/bigdata/hadoop/mapred-systemdir</value> </property> </configuration> | cs |
yarn-site.xml
<configuration> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name> <value>org.apache.hadoop.mapred.ShuffleHandler</value> </property> <property> <name>yarn.resourcemanager.resource-tracker.address</name> <value>ebdp-po-dkr10d.sys.comcast.net:8025</value> </property> <property> <name>yarn.resourcemanager.scheduler.address</name> <value>ebdp-po-dkr10d.sys.comcast.net:8030</value> </property> <property> <name>yarn.resourcemanager.address</name> <value>ebdp-po-dkr10d.sys.comcast.net:8035</value> </property> </configuration> | cs |
Format Namenode
# hadoop namenode -format | cs |
Launch Hadoop Daemons
# cd $HADOOP_HOME # ./sbin/start-dfs.sh .... # ./sbin/start-yarn.sh .... | cs |
Note that "$HADOOP_HOME/sbin/start-all.sh" is equivalent to the 2 commands above but it is deprecated.
Verification
Main Webpage
JPS
# jps 22480 NameNode 23558 Jps 22874 ResourceManager 22700 SecondaryNameNode | cs |
On slave,
# jps 12448 DataNode 12811 Jps 12590 NodeManager | cs |
Hadoop Command
# hadoop fs -df -h Filesystem Size Used Available Use% hdfs://hadoop-master:54310 2.0 T 8 K 1.9 T 0% | cs |
How to check # of CPU on Mac
Mac OS does not support proc file system (/proc) as it is based on FreeBSD.
To check the number of CPU, the following commands can be used.
sysctl -n hw.ncpu | cs |
This is equivalent to the logical CPU numbers as following.
sysctl -n hw.logicalcpu | cs |
The number of physical CPU can be retrieved with the following command.
sysctl -n hw.physicalcpu | cs |
'OS - Mac' 카테고리의 다른 글
How to make my Mac remember my SSH passphrase (0) | 2018.03.01 |
---|
How to list up all users
'OS - Linux' 카테고리의 다른 글
Loopback device (0) | 2018.04.24 |
---|
How to download Oracle JDK using CLI
You need to put a headeer of "Cookie: oraclelicense=accept-securebackup-cookie" in using curl.
Here is the example of download of JDK 1.8 u131.
curl -LO -H "Cookie: oraclelicense=accept-securebackup-cookie" \ http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.rpm | cs |
'Programming Language > Java' 카테고리의 다른 글
How to check default heap size of JVM (0) | 2018.03.09 |
---|---|
Spring Boot on Java 9 (0) | 2018.03.02 |
How to download Oracle JDK from CLI (0) | 2018.03.01 |
How to check default heap size of JVM (0) | 2018.03.01 |
CPU Load Generator (0) | 2018.03.01 |
How to check default heap size of JVM
https://stackoverflow.com/questions/4667483/how-is-the-default-java-heap-size-determined
java -XX:+PrintFlagsFinal -version | grep HeapSize | cs |
'Programming Language > Java' 카테고리의 다른 글
How to download Oracle JDK using CLI (0) | 2018.03.09 |
---|---|
Spring Boot on Java 9 (0) | 2018.03.02 |
How to download Oracle JDK from CLI (0) | 2018.03.01 |
How to check default heap size of JVM (0) | 2018.03.01 |
CPU Load Generator (0) | 2018.03.01 |