分类:服务器技术

服务器

一看必会系列:docker 练习 5创建mysql容器

No Comments Docker

 

 

$ docker run –name jeffmysql001 \
-v /opt/dockerfile/mysql_5.7/data:/var/lib/mysql \
-v /opt/dockerfile/mysql_5.7/conf:/etc/mysql/conf.d \
-e MYSQL_ROOT_PASSWORD=xxx \
-P -d reg.ccie.wang/library/mysql:5.7
 
 
root@docker:/opt/dockerfile/mysql_5.7# docker ps
CONTAINER ID        IMAGE                             COMMAND                  CREATED             STATUS              PORTS                                               NAMES
ee025d911e4c        reg.ccie.wang/library/mysql:5.7   "docker-entrypoint.s…"   23 seconds ago      Up 21 seconds       0.0.0.0:32802->3306/tcp, 0.0.0.0:32801->33060/tcp   jeffmysql001

连接测试
mysql -h localhost -P 32802 -u root -p
 
用同主机的容器进行测试
root@docker:~# docker run -it busybox

出现乱码即为正常
/ # telnet 192.169.110.3 3306
J
5.7.25|D&R%(iKm[18V“OfCmysql_native_passwordConnection closed by foreign host

------中间广告---------

 
执行命令,有输出即为正常
show PROCESSLIST

修改默认信息
$ docker run -it –link some-mysql:mysql –rm mysql sh -c ‘exec mysql -h"$MYSQL_PORT_3306_TCP_ADDR" -P"$MYSQL_PORT_3306_TCP_PORT" -uroot -p"$MYSQL_ENV_MYSQL_ROOT_PASSWORD"’

当客户端使用,端口要确认不要搞错了
# docker run -it –rm reg.ccie.wang/library/mysql:5.7 mysql -h"192.168.10.67" -uroot -p -P 32804

Server version: 5.7.25 MySQL Community Server (GPL)

mysql> show processlist
    -> ;
+—-+——+———————+——+———+——+———-+——————+
| Id | User | Host                | db   | Command | Time | State    | Info             |
+—-+——+———————+——+———+——+———-+——————+
|  3 | root | 192.169.110.1:37030 | NULL | Query   |    0 | starting | show processlist |
+—-+——+———————+——+———+——+———-+——————+
1 row in set (0.00 sec)

https://hub.docker.com/_/mysql

一看必会系列:dockerfile 练习4 jenkins docker创建

No Comments Docker

mkdir -p /opt/dockerfile/centos_jenkins

docker pull  jenkins:2.60.3

docker run -p 8080:8080 -p 50000:50000 -v /opt/dockerfile/centos_jenkins:/var/jenkins_home jenkins:2.60.3

# docker ps
CONTAINER ID        IMAGE               COMMAND                  CREATED              STATUS            
f15b3dea0bc7        jenkins:2.60.3      "/bin/tini — /usr/l…"   About a minute ago   Up About a minute
PORTS                                               NAMES
0.0.0.0:32798->8080/tcp, 0.0.0.0:32797->50000/tcp   suspicious_tharp

进入容器读取信息
root@docker:/opt/dockerfile/centos_jenkins# docker exec -it f15b3dea0bc7 /bin/bash
jenkins@f15b3dea0bc7:/$

访问 http://192.168.10.67:32798/login?from=%2F

读取管理密码
jenkins@f15b3dea0bc7:/$ cat /var/jenkins_home/secrets/initialAdminPassword
a642a5e7ca8b422196f71adb349aa48d

jenkins docker 容器里如何升级、
1。
下载最新包 https://mirrors.tuna.tsinghua.edu.cn/jenkins/war-stable/2.164.1/jenkins.war
2.停止容器
docker stop id
3.换更新包
复制jenkins.war  到容器的jenkins目录
docker cp jenkins.war f15b3dea0bc7:/usr/share/jenkins/jenkins.war
4.启动容器
docker start f15b3dea0bc7
f15b3dea0bc7
root@docker:/opt/dockerfile# docker ps
CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS            
f15b3dea0bc7        jenkins:2.60.3      "/bin/tini — /usr/l…"   31 minutes ago      Up 15 seconds  
PORTS                                               NAMES   
0.0.0.0:32800->8080/tcp, 0.0.0.0:32799->50000/tcp   suspicious_tharp
5.完成

 

报错

root@docker:/opt/dockerfile# docker run -P -v /opt/dockerfile/centos_jenkins:/var/jenkins_home jenkins:2.60.3
touch: cannot touch ‘/var/jenkins_home/copy_reference_file.log’: Permission denied
Can not write to /var/jenkins_home/copy_reference_file.log. Wrong volume permissions?
root@docker:/opt/dockerfile# ll /opt/dockerfile/centos_jenkins
total 8
drwxr-xr-x 2 jenkins jenkins 4096 3月  15 18:02 ./
drwxr-xr-x 6 root    root    4096 3月  15 18:02 ../
root@docker:/opt/dockerfile#

解决
useradd jenkins 
chown jenkins:jenkins -R centos_jenkins

依然报同样的错。说明权限还是不够
改成
sudo chown -R 1000:1000 /opt/jenkins
修复成功

原因在这里
Dockerfile 里有一段这个  UID GID 为1000.容器USER 和主机USER不能对等所以用 id就可以解决
ARG user=jenkins
ARG group=jenkins
ARG uid=1000
ARG gid=1000

https://hub.docker.com/_/jenkins?tab=description

一看必会系列:jenkins docker 容器升级jenkins版本

No Comments Docker

jenkins docker 容器里如何升级、
1。
下载最新包 https://mirrors.tuna.tsinghua.edu.cn/jenkins/war-stable/2.164.1/jenkins.war
2.停止容器
docker stop id
3.换更新包
复制jenkins.war  到容器的jenkins目录
docker cp jenkins.war f15b3dea0bc7:/usr/share/jenkins/jenkins.war
4.启动容器
docker start f15b3dea0bc7
f15b3dea0bc7
root@docker:/opt/dockerfile# docker ps
CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS            
f15b3dea0bc7        jenkins:2.60.3      "/bin/tini — /usr/l…"   31 minutes ago      Up 15 seconds  
PORTS                                               NAMES   
0.0.0.0:32800->8080/tcp, 0.0.0.0:32799->50000/tcp   suspicious_tharp
5.完成

一看必会系列:dockerfile 练习3 继承方式创建apache镜像

No Comments Docker

 

目录
../ubuntu_apache/
├── dockerfile
├── html
│   └── index.html
└── run.sh

相关文件内容
1.dockerfile
FROM reg.ccie.wang/test/ubuntu:sshd001  #继承前面做的镜像,已经包含sshd
MAINTAINER docker_user_jeff
ENV DEBIAN FRONTEND noninteractive
RUN apt-get -yq install apache2&&\
    rm -rf /var/lib/apt/lists/*
ADD run.sh /run.sh
RUN chmod 755 /*.sh
RUN mkdir -p /var/lock/apache2 &&mkdir -p /app &&\
    rm -fr /var/www/html && \
    ln -s /app /var/www/html &&\
    mkdir -p /var/run/apache2
COPY html/ /app/
ENV APACHE_RUN_USER www-data
ENV APACHE_RUN_GROUP www-data
ENV APACHE_LOG_DIR /var/log/apache2
ENV APACHE_PID_FILE /var/run/apache2.pid
ENV APACHE_RUN_DIR /var/run/apache2
ENV APACHE_SERVERNAME localhost
ENV APACHE_SERVERALIAS docker.localhost
ENV APACHE_DOCUMENTROOT /var/www
ENV APACHE_LOCK_DIR /var/lock/apache2

EXPOSE 80
WORKDIR /app
CMD ["/run.sh"]

2.run.sh
#!/bin/bash
#启动sshd
/usr/sbin/sshd &  
#启动 apache2
exec apache2 -D FOREGROUND

3. index.html
<h1>docker ubuntu_apache by jeff</h1>

建立镜像
docker build -t apache:ubuntu004 .

查看镜像
docker images |grep 004
apache                            ubuntu004           6197aa3de935        6 minutes ago       280MB

启动并查看 环境是否正确
docker run -it –rm apache:ubuntu004 env
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
HOSTNAME=aad3a9045a75
TERM=xterm
DEBIAN=FRONTEND noninteractive
APACHE_RUN_USER=www-data
APACHE_RUN_GROUP=www-data
APACHE_LOG_DIR=/var/log/apache2
APACHE_PID_FILE=/var/run/apache2.pid
APACHE_RUN_DIR=/var/run/apache2
APACHE_SERVERNAME=localhost
APACHE_SERVERALIAS=docker.localhost
APACHE_DOCUMENTROOT=/var/www
APACHE_LOCK_DIR=/var/lock/apache2
HOME=/root

 

创建容器  并将容器内80端口映射到主机的 8081
# docker run -itd -p 8082:80 -p 18081:22 reg.ccie.wang/test/ubuntu:apache2.4.29 /run.sh
176ae6ea0ebbc1c8707576cece1a0265cf2b52839f1ec37fb9d7b0281a967a2e

# docker ps
CONTAINER ID        IMAGE                                    COMMAND              CREATED             STATUS              PORTS                                         NAMES
176ae6ea0ebb        reg.ccie.wang/test/ubuntu:apache2.4.29   "/run.sh"            4 seconds ago       Up 2 seconds        0.0.0.0:18081->22/tcp, 0.0.0.0:8082->80/tcp   condescending_noyce

回主机测试 8081服务是否正常
# curl localhost:8081
<h1>docker ubuntu_apache by jeff</h1>
#以上结果为正常

查看安装apache2产生的可回收文件

root@89bcf36427d7:/# du -h –max-depth=1 /var/lib/apt/lists/
4.0K    /var/lib/apt/lists/partial
4.0K    /var/lib/apt/lists/auxfiles
24M    /var/lib/apt/lists/

删除减少镜像
root@89bcf36427d7:/# rm -rf /var/lib/apt/lists/*
root@89bcf36427d7:/#
root@89bcf36427d7:/#
root@89bcf36427d7:/# du -h –max-depth=1 /var/lib/apt/lists/
8.0K    /var/lib/apt/lists/
root@89bcf36427d7:/#

报错   docker写的有问题  值 没有传入镜像 已更正

# docker run -it –rm apache:ubuntu002 /run.sh
apache2: Syntax error on line 80 of /etc/apache2/apache2.conf: DefaultRuntimeDir must be a valid directory, absolute or relative to ServerRoot

一看必会系列:dockerfile 练习2 创建apache镜像

No Comments Docker

 

新建目录及文件
/opt/dockerfile/httpd_2.4
├── httpd_2.4
│   ├── dockerfile
│   ├── httpd.conf
│   └── public-html
│       └── index.html

index.html 内容
<h1>jeff dockerfile apache</h1>

dockerfile 内容
FROM httpd:2.4   #基础镜像名
COPY ./public-html/ /usr/local/apache2/htdocs/  #复制目录文件到 容器内目录

生成镜像
root@docker:/opt/dockerfile/httpd_2.4# docker build -t httpd_jeff:1.0.0 .
Sending build context to Docker daemon  26.11kB
Step 1/2 : FROM httpd:2.4
—> 2d1e5208483c
Step 2/2 : COPY ./public-html/ /usr/local/apache2/htdocs/
—> 46af319543cf
Successfully built 46af319543cf
Successfully tagged httpd_jeff:1.0.0

验证镜像
root@docker:/opt/dockerfile/httpd_2.4# docker images
REPOSITORY                        TAG                 IMAGE ID            CREATED             SIZE
httpd_jeff                        1.0.0               46af319543cf        8 minutes ago       132MB

运行镜像  -i 交互模式 -t 伪终端 -d 后台运行
root@docker:/opt/dockerfile/httpd_2.4# docker run -itd  –name httpd_jeff -p 8080:80 httpd_jeff:1.0.0
504df67c3fecea806c39338c82f4440e4797b79e9cad6d332da1750ec6e63fc0

root@docker:/opt/dockerfile/httpd_2.4# docker ps
CONTAINER ID        IMAGE               COMMAND              CREATED             STATUS              PORTS                  NAMES
504df67c3fec        httpd_jeff:1.0.0    "httpd-foreground"   5 seconds ago       Up 3 seconds        0.0.0.0:8080->80/tcp   httpd_jeff

验证
root@docker:/opt/dockerfile/httpd_2.4# curl 192.168.10.67:8080
<h1>jeff dockerfile apache</h1>
以上信息为正常

 

报错解决
root@docker:/opt/dockerfile/httpd_2.4# docker run -it –rm –name my-running-app -p 8081:80 apache2.4-images:001
AH00558: httpd: Could not reliably determine the server’s fully qualified domain name, using 192.169.110.10. Set the ‘ServerName’ directive globally to suppress this message
AH00558: httpd: Could not reliably determine the server’s fully qualified domain name, using 192.169.110.10. Set the ‘ServerName’ directive globally to suppress this message
[Thu Mar 14 04:10:23.728775 2019] [mpm_event:notice] [pid 1:tid 139725533614144] AH00489: Apache/2.4.38 (Unix) configured — resuming normal operations
[Thu Mar 14 04:10:23.728982 2019] [core:notice] [pid 1:tid 139725533614144] AH00094: Command line: ‘httpd -D FOREGROUND’
[Thu Mar 14 04:10:23.821551 2019] [mpm_event:notice] [pid 1:tid 139725533614144] AH00492: caught SIGWINCH, shutting down gracefully

修改

root@c76876b5237f:/usr/local/apache2# sed -i "s/#ServerName www.example.com:80/ServerName 0.0.0.0:80/g" /usr/local/apache2/conf/httpd.conf
root@c76876b5237f:/usr/local/apache2# !c
cat conf/httpd.conf |grep Name
# ServerName gives the name and port that the server uses to identify itself.
ServerName 0.0.0.0:80
root@c76876b5237f:/usr/local/apache2#

root@docker:/opt/dockerfile/httpd_2.4# docker run -it –rm –name my-running-app -p 8081:80 apache2.4-images:003[Thu Mar 14 04:12:06.933813 2019] [mpm_event:notice] [pid 1:tid 139627409084480] AH00489: Apache/2.4.38 (Unix) configured — resuming normal operations
[Thu Mar 14 04:12:06.933976 2019] [core:notice] [pid 1:tid 139627409084480] AH00094: Command line: ‘httpd -D FOREGROUND’
[Thu Mar 14 04:12:07.022337 2019] [mpm_event:notice] [pid 1:tid 139627409084480] AH00492: caught SIGWINCH, shutting down gracefully

 

停止所有容器
  docker stop $(docker ps|awk ‘{print $1}’)
删除所有容器
docker rm $(docker ps -a|awk ‘{print $1}’)
删除所有镜像
docker rmi -f $(docker images |grep dockerfile|awk ‘{print $3}’)

 

https://hub.docker.com/_/httpd?tab=description

一看必会系列:dockerfile 练习一 创建支持sshd 的ubuntu镜像

No Comments Docker

dockerfile 创建支持sshd 的ubuntu镜像

先建目录

root@docker:/opt/dockerfile/ubuntu_sshd# !t
tree ../
../
└── ubuntu_sshd
    ├── AZURE-FAT-OPS-PRI
    ├── dockerfile
    └── run.sh

dockfile的内容

root@docker:/opt/dockerfile/ubuntu_sshd# cat dockerfile
FROM reg.ccie.wang/library/ubuntu:18.04
MAINTAINER jeff@jdccie.com
RUN sed -i "s/archive.ubuntu.com/mirrors.aliyun.com/g" /etc/apt/sources.list #替换本地原
RUN apt-get update
RUN apt-get install -y openssh-server   #安装sshd
RUN mkdir /var/run/sshd                   #需要建议目录不然无法启动
RUN mkdir /root/.ssh
RUN useradd jeff001                      #新建用户
RUN echo jeff001:111111 | chpasswd       #修改密码
ADD run.sh /run.sh                       #增加脚本启动 sshd
RUN chmod 755 /run.sh                    #修改执行权限
EXPOSE 22                                #容器暴露 22端口

启动执行脚本的网容
root@docker:/opt/dockerfile/ubuntu_sshd# cat run.sh
#!/bin/bash
/usr/sbin/sshd -D

在docker同目录下build
docker build -t sshd:dockerfile004 .

查看生成的镜像
root@docker:/opt/dockerfile/ubuntu_sshd# docker images |grep dockerfile
sshd                              dockerfile004       c4b27b86c0b8        5 minutes ago       228MB
sshd                              dockerfile003       74213a63a585        18 minutes ago      228MB
sshd                              dockerfile002       4981e935d5df        38 minutes ago      228MB
sshd                              dockerfile001       1a767dd4e3e2        About an hour ago   228MB
root@docker:/opt/dockerfile/ubuntu_sshd#

运行  将容器的22号映射到host的12224端口
docker run -p 12224:22 -d sshd:dockerfile004  /run.sh
 
root@docker:/opt/dockerfile/ubuntu_sshd# docker ps |grep dockerfile
c68fbed21f8b        sshd:dockerfile004                   "/run.sh"                3 minutes ago       Up 3 minutes        0.0.0.0:12224->22/tcp   optimistic_curie

测试是否能ssh   ssh 用户@主机IP ssh端口
ssh jeff001@192.168.10.67 12224

提示如下即成功
Connecting to 192.168.10.67:12224…
Connection established.
To escape to local shell, press ‘Ctrl+Alt+]’.

WARNING! The remote SSH server rejected X11 forwarding request.
Welcome to Ubuntu 18.04.1 LTS (GNU/Linux 4.18.0-16-generic x86_64)

测试成功

推送镜像到 仓库
#标记成仓库相关的  docker tag SOURCE_IMAGE[:TAG] reg.ccie.wang/test/IMAGE[:TAG]
docker tag sshd:dockerfile004 reg.ccie.wang/test/ubuntu:sshd001
#推镜像到仓库    docker push reg.ccie.wang/test/IMAGE[:TAG]
docker push reg.ccie.wang/test/ubuntu:sshd001
The push refers to repository [reg.ccie.wang/test/ubuntu]
e8606e3e9f2b: Preparing

denied: requested access to the resource is denied   #说明没有登陆或没有权限。

先登陆到仓库
root@docker:/opt/dockerfile/ubuntu_sshd# sh +x /root/login.sh
WARNING! Using –password via the CLI is insecure. Use –password-stdin.
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

再次推送到仓库
root@docker:/opt/dockerfile/ubuntu_sshd# docker push reg.ccie.wang/test/ubuntu:sshd001
The push refers to repository [reg.ccie.wang/test/ubuntu]
e8606e3e9f2b: Pushed
638b4a9375b3: Pushed
38f2771441e5: Pushed
d41a6372b93f: Pushed
992b79b09792: Pushed
d76a7595b8f4: Pushed
03e5cb7ca68f: Pushed
23f382505b40: Pushed
4b7d93055d87: Pushed
663e8522d78b: Pushed
283fb404ea94: Pushed
bebe7ce6215a: Pushed
sshd001: digest: sha256:5c60c99a4d59a40dafe5dfe26a37679804e1d2d09ec90bbe975e5abd2a5c0361 size: 3024

以上为推送成功

拉取镜像测试
1.删除现有镜像

docker rmi reg.ccie.wang/test/ubuntu:sshd001
Untagged: reg.ccie.wang/test/ubuntu:sshd001
Untagged: reg.ccie.wang/test/ubuntu@sha256:5c60c99a4d59a40dafe5dfe26a37679804e1d2d09ec90bbe975e5abd2a5c0361
以上为成功

2.从仓库拉取镜像  docker pull reg.ccie.wang/test/ubuntu:sshd001

root@docker:/opt/dockerfile/ubuntu_sshd# docker pull reg.ccie.wang/test/ubuntu:sshd001
sshd001: Pulling from test/ubuntu
Digest: sha256:5c60c99a4d59a40dafe5dfe26a37679804e1d2d09ec90bbe975e5abd2a5c0361
Status: Downloaded newer image for reg.ccie.wang/test/ubuntu:sshd001

验证是否成功
root@docker:/opt/dockerfile/ubuntu_sshd# docker images
REPOSITORY                        TAG                 IMAGE ID            CREATED             SIZE
reg.ccie.wang/test/ubuntu         sshd001             c4b27b86c0b8        14 hours ago        228MB

使用该镜像生成容器
#
docker run -p 12225:22 -d reg.ccie.wang/test/ubuntu:sshd001  /run.sh
7ae5fd70960dc933c1f98106a360da3f14c5e93d7cd17bce0f896f69f041fc67
#验证
root@docker:/opt/dockerfile/ubuntu_sshd# docker ps |grep sshd001
7ae5fd70960d        reg.ccie.wang/test/ubuntu:sshd001    "/run.sh"                10 seconds ago      Up 7 seconds        0.0.0.0:12225->22/tcp   happy_albattani
以上信息为成功

也可把当前容器打包一个镜像文件docker commit  容器ID 新TAG

# docker commit 7ae5fd70960d ubuntu:sshd002
sha256:a9c6f22446466cbf920d5e22f95e0d378558d3852a3bc1592a701d78777ba222

验证
root@docker:/opt/dockerfile/ubuntu_sshd# docker images
REPOSITORY                        TAG                 IMAGE ID            CREATED             SIZE
ubuntu                            sshd002             a9c6f2244646        5 seconds ago       228MB

Linux通过Shell脚本命令修改密码的两种方式

No Comments Linux

使用脚本修改密码

很多时候我们可能需要远程执行服务器上的脚本来修改账号密码,此时就没有办法进行交互了。

此时可以使用如下两种方式修改密码:

方式1: 试了不行

echo "password" | passwd testuser --stdin > /dev/null 2>&1

方式2: 这个可以

echo testuser:password|chpasswd

注:

1.密码字符串的双引号,可有可无,见上面 方式1 和 方式2 的例子

2.如果密码中包含 $ 字符,需要使用反斜线进行转义,如:

 

echo testuser:password\$|chpasswd

总结

以上所述是小编给大家介绍的Linux通过Shell脚本命令修改密码的两种方式,希望对大家有所帮助,如果大家有任何疑问请给我留言,小编会及时回复大家的。在此也非常感谢大家对脚本之家网站的支持!

https://www.jb51.net/article/140708.htm

一看必会系列:useradd -p新建用户登录不了系统解决方案

No Comments Linux

 

 

1。先建用户

useradd jeff003

2 再设置密码

passwd jeff003

111111

3.cat /etc/shadow  查看加密后的密码

 

jeff003:$6$4MbjRKVY$UNqUhN485ZMgOL0IuLBh09ok1ERlksSPnmD.2SwxLeOWHxQtx.3u6l8toNJWFnWFO1a1QdhQSJBKASp2EZ7/t/:17968:0:99999:7:::

4.使用命令生成用户 及密码 useradd –p ‘加密密码’ 用户名

useradd  -p ‘$6$4MbjRKVY$UNqUhN485ZMgOL0IuLBh09ok1ERlksSPnmD.2SwxLeOWHxQtx.3u6l8toNJWFnWFO1a1QdhQSJBKASp2EZ7/t/’ jeff004

一看必会系列:docker-compose简单部署nginx

No Comments Docker

task
service
stack

安装
  719  sudo curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
  720  sudo chmod +x /usr/local/bin/docker-compose
  721  docker-compose -h
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
root@docker:~# docker-compose –version
docker-compose version 1.23.2, build 1110ad01
root@docker:~#

目录分布如下
└── dweb
    ├── docker-compose.yml
    ├── nginx01
    │   └── index.html
    └── nginx02
        ├── 1
        └── index.html

配置文件如下docker-compose.yml
root@docker:/opt/docker-compose/dweb# cat docker-compose.yml
nginx01:
    image: reg.ccie.wang/library/nginx:1.15.9 
    volumes:
         – ./nginx01:/usr/share/nginx/html   #将本地目录挂载到 容器内的目录/usr/share/nginx/html
    container_name: nginx01
    ports:
                – "80:80"                     #将本地端口和容器内端口映射 。前面是本地端口。后面是容器内端口
nginx02:
    image: reg.ccie.wang/library/nginx:1.15.9
    volumes:
          – ./nginx02:/usr/share/nginx/html
    container_name: nginx02
    ports:
                – "81:80"

在docker-compose.yml目录运行  -d 是后台运行
docker-compose up -d
docker-compose down -v  #停止。并删除所有卷
docker-compose restart #重启服务

root@docker:/opt/docker-compose/dweb# docker-compose up -d
Creating nginx01 … done
Creating nginx02 … done

               
验证端口
root@docker:/opt/docker-compose/dweb# netstat -ntlp |grep docker
tcp6       0      0 :::80                   :::*                    LISTEN      24251/docker-proxy 
tcp6       0      0 :::81                   :::*                    LISTEN      24237/docker-proxy 
root@docker:/opt/docker-compose/dweb#

验证docker进程
root@docker:/opt/docker-compose/dweb# docker ps
CONTAINER ID        IMAGE                                COMMAND                  CREATED             STATUS              PORTS                NAMES
5f9a63957028        reg.ccie.wang/library/nginx:1.15.9   "nginx -g ‘daemon of…"   34 seconds ago      Up 31 seconds       0.0.0.0:80->80/tcp   nginx01
cfb46b4ea3d3        reg.ccie.wang/library/nginx:1.15.9   "nginx -g ‘daemon of…"   34 seconds ago      Up 31 seconds       0.0.0.0:81->80/tcp   nginx02

生成index.html文件验证web可用性
root@docker:/opt/docker-compose/dweb# cd nginx01
root@docker:/opt/docker-compose/dweb/nginx01# vim index.html
root@docker:/opt/docker-compose/dweb/nginx01# cd ..
root@docker:/opt/docker-compose/dweb# vim nginx02/index.html
root@docker:/opt/docker-compose/dweb#

结果正常
root@docker:/opt/docker-compose/dweb# curl localhost:81
002
root@docker:/opt/docker-compose/dweb# curl localhost:80
001
root@docker:/opt/docker-compose/dweb#

一看必会系列:etcd 单机集群部署

No Comments Linux

etcd 单机集群部署

下载
https://github.com/etcd-io/etcd/releases/tag/v3.3.12

ETCD_VER=v3.3.12

# choose either URL
GOOGLE_URL=https://storage.googleapis.com/etcd
GITHUB_URL=https://github.com/etcd-io/etcd/releases/download
DOWNLOAD_URL=${GOOGLE_URL}

rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
rm -rf /tmp/etcd-download-test && mkdir -p /tmp/etcd-download-test

curl -L ${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download-test –strip-components=1
rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz

/tmp/etcd-download-test/etcd –version
ETCDCTL_API=3 /tmp/etcd-download-test/etcdctl version

也可以下载后将
etcd*  复制到/usr/local/bin  就可以直接使用etcd  etcdctl命令了

 

创建
root@docker:~# tree /opt/etcd -L 2
/opt/etcd
├── conf
│   ├── node1.yml   配置文件
│   ├── node2.yml
│   └── node3.yml
└── data
    ├── node1       节点数据目录
    ├── node2
    ├── node3
    └── node4

6 directories, 3 files
root@docker:~#

各节点配置文件
root@docker:~# cat /opt/etcd/conf/*

name: node1
data-dir: /opt/etcd/data/node1
listen-client-urls: ‘http://0.0.0.0:9002′
advertise-client-urls: ‘http://0.0.0.0:9002′
listen-peer-urls: ‘http://0.0.0.0:9001′
initial-advertise-peer-urls: ‘http://0.0.0.0:9001′
initial-cluster: node1=http://0.0.0.0:9001,node2=http://0.0.0.0:9003,node3=http://0.0.0.0:9005
initial-cluster-token: etcd-cluster-1
initial-cluster-state: new

name: node2
data-dir: /opt/etcd/data/node2
listen-client-urls: ‘http://0.0.0.0:9004′
advertise-client-urls: ‘http://0.0.0.0:9004′
listen-peer-urls: ‘http://0.0.0.0:9003′
initial-advertise-peer-urls: ‘http://0.0.0.0:9003′
initial-cluster: node1=http://0.0.0.0:9001,node2=http://0.0.0.0:9003,node3=http://0.0.0.0:9005
initial-cluster-token: etcd-cluster-1
initial-cluster-state: new

name: node3
data-dir: /opt/etcd/data/node3
listen-client-urls: ‘http://0.0.0.0:9006′
advertise-client-urls: ‘http://0.0.0.0:9006′
listen-peer-urls: ‘http://0.0.0.0:9005′
initial-advertise-peer-urls: ‘http://0.0.0.0:9005′
initial-cluster: node1=http://0.0.0.0:9001,node2=http://0.0.0.0:9003,node3=http://0.0.0.0:9005
initial-cluster-token: etcd-cluster-1
initial-cluster-state: new
root@docker:~#

启动脚本
nohup etcd –config-file=/opt/etcd/conf/node1.yml &
nohup etcd –config-file=/opt/etcd/conf/node2.yml &
nohup etcd –config-file=/opt/etcd/conf/node3.yml &
root@docker:~#

 

参数说明:
● –data-dir 指定节点的数据存储目录,若不指定,则默认是当前目录。这些数据包括节点ID,集群ID,集群初始化配置,Snapshot文件,若未指 定–wal-dir,还会存储WAL文件
● –wal-dir 指定节点的was文件存储目录,若指定了该参数,wal文件会和其他数据文件分开存储
● –name 节点名称
● –initial-advertise-peer-urls 告知集群其他节点的URL,tcp2380端口用于集群通信
● –listen-peer-urls 监听URL,用于与其他节点通讯
● –advertise-client-urls 告知客户端的URL, 也就是服务的URL,tcp2379端口用于监听客户端请求
● –initial-cluster-token 集群的ID
● –initial-cluster 集群中所有节点
● –initial-cluster-state 集群状态,new为新创建集群,existing为已存在的集群

在etcd1、etcd2上分别做相似操作,只需将脚本中–advertise-client-urls 和 –initial-advertis-peer-urls 参数修改一下即可。

注意:上面的初始化只是在集群初始化时运行一次,之后节点的服务有重启,必须要去掉initial参数,否则报错。

验证
root@docker:~# etcdctl –endpoints http://127.0.0.1:9002,http://127.0.0.1:9004,http://127.0.0.1:9006 member list

b5b6e1baef01d74: name=node2 peerURLs=http://0.0.0.0:9003 clientURLs=http://0.0.0.0:9004 isLeader=false
7f630db3033b1564: name=node1 peerURLs=http://0.0.0.0:9001 clientURLs=http://0.0.0.0:9002 isLeader=false
fd1de2479ca19cfa: name=node3 peerURLs=http://0.0.0.0:9005 clientURLs=http://0.0.0.0:9006 isLeader=true
root@docker:~#

root@docker:~# etcdctl –endpoints http://127.0.0.1:9006 cluster-health
member b5b6e1baef01d74 is healthy: got healthy result from http://0.0.0.0:9004
member 7f630db3033b1564 is healthy: got healthy result from http://0.0.0.0:9002
member fd1de2479ca19cfa is healthy: got healthy result from http://0.0.0.0:9006
cluster is healthy
root@docker:~#

修改节点信息

root@docker:~# etcdctl –endpoints http://127.0.0.1:9006 member update fd1de2479ca19cfa http://192.168.10.67:9006
Updated member with ID fd1de2479ca19cfa in cluster
root@docker:~# etcdctl –endpoints http://127.0.0.1:9006 cluster-health
member b5b6e1baef01d74 is healthy: got healthy result from http://0.0.0.0:9004
member 7f630db3033b1564 is healthy: got healthy result from http://0.0.0.0:9002
member fd1de2479ca19cfa is healthy: got healthy result from http://0.0.0.0:9006

如果你想更新一个节点的IP(peerURLS),首先你需要知道那个节点的ID,就是最前面的一段b5b6e1baef01d74
root@docker:~# etcdctl –endpoints http://127.0.0.1:9006 member list
b5b6e1baef01d74: name=node2 peerURLs=http://0.0.0.0:9003 clientURLs=http://0.0.0.0:9004 isLeader=false
7f630db3033b1564: name=node1 peerURLs=http://0.0.0.0:9001 clientURLs=http://0.0.0.0:9002 isLeader=false
fd1de2479ca19cfa: name=node3 peerURLs=http://192.168.10.67:9006 clientURLs=http://0.0.0.0:9006 isLeader=true
root@docker:~#

删除一个节点
root@docker:~# etcdctl –endpoints http://127.0.0.1:9006 member remove fd1de2479ca19cfa
Removed member fd1de2479ca19cfa from cluster

验证
root@docker:~# etcdctl –endpoints http://127.0.0.1:9002 member list
b5b6e1baef01d74: name=node2 peerURLs=http://0.0.0.0:9003 clientURLs=http://0.0.0.0:9004 isLeader=true
7f630db3033b1564: name=node1 peerURLs=http://0.0.0.0:9001 clientURLs=http://0.0.0.0:9002 isLeader=false

增加一个节点
root@docker:~# etcdctl –endpoints http://127.0.0.1:9002 member add node3 http://0.0.0.0:9005
Added member named node3 with ID 3979a731e0408e32 to cluster

提示信息
ETCD_NAME="node3"
ETCD_INITIAL_CLUSTER="node2=http://0.0.0.0:9003,node3=http://0.0.0.0:9005,node1=http://0.0.0.0:9001"
ETCD_INITIAL_CLUSTER_STATE="existing"
root@docker:~#

验证是否正常
root@docker:/opt/etcd/conf# etcdctl –endpoints http://127.0.0.1:9002 member list
b5b6e1baef01d74: name=node2 peerURLs=http://0.0.0.0:9003 clientURLs=http://0.0.0.0:9004 isLeader=true
3979a731e0408e32[unstarted]: peerURLs=http://0.0.0.0:9005        -----状态不对
7f630db3033b1564: name=node1 peerURLs=http://0.0.0.0:9001 clientURLs=http://0.0.0.0:9002 isLeader=false
root@docker:/opt/etcd/conf#

解决
清空目标节点etcd3的data-dir
节点删除后,集群中的成员信息会更新,新节点是作为一个全新的节点加入集群,如果data-dir有数据,
etcd启动时会读取己经存在的数据,仍然用老的memberID会造成无法加入集群,所以一定要清空新节点的data-dir。

root@docker:/opt/etcd/conf# rm -rf /opt/etcd/data/node3/

这里的initial标记一定要指定为existing,如果为new,则会自动生成一个新的memberID,
这和前面添加节点时生成的ID不一致,故日志中会报节点ID不匹配的错。

正确配置如下
root@docker:/opt/etcd/conf# vim node3.yml
name: node3
data-dir: /opt/etcd/data/node3
listen-client-urls: ‘http://0.0.0.0:9006′
advertise-client-urls: ‘http://0.0.0.0:9006′
listen-peer-urls: ‘http://0.0.0.0:9005′
initial-advertise-peer-urls: ‘http://0.0.0.0:9005′
initial-cluster: node1=http://0.0.0.0:9001,node2=http://0.0.0.0:9003,node3=http://0.0.0.0:9005
initial-cluster-token: etcd-cluster-1
initial-cluster-state: existing    —[]这里由new改为existing:
修改–advertise-client-urls 和 –initial-advertis-peer-urls 参数修改为etcd3的,–initial-cluster-state改为existing

 

启动node3
nohup etcd –config-file=/opt/etcd/conf/node3.yml &

ps -ef |grep etcd
root     12747     1  1 12:39 pts/0    00:00:05 etcd –config-file=/opt/etcd/conf/node1.yml
root     12748     1  1 12:39 pts/0    00:00:06 etcd –config-file=/opt/etcd/conf/node2.yml
root     12966 31275  0 12:44 pts/0    00:00:01 etcd –config-file=/opt/etcd/conf/node3.yml

验证,结果正确
root@docker:~# etcdctl –endpoints http://127.0.0.1:9004 get jdccie
http://www.jdccie.com
root@docker:~# etcdctl –endpoints http://127.0.0.1:9006 get jdccie
http://www.jdccie.com
root@docker:~#

root@docker:~# etcdctl –endpoints http://127.0.0.1:9004 set ssl sslvpn.ccie.wang
sslvpn.ccie.wang
root@docker:~# etcdctl –endpoints http://127.0.0.1:9006 get ssl
sslvpn.ccie.wang
root@docker:~#

节点扩容

1 加节点
etcdctl –endpoints http://127.0.0.1:9002 member add node4 http://0.0.0.0:9007
Added member named node4 with ID 63bd58b500460e51 to cluster

ETCD_NAME="node4"
ETCD_INITIAL_CLUSTER="node2=http://0.0.0.0:9003,node3=http://0.0.0.0:9005,node4=http://0.0.0.0:9007,node1=http://0.0.0.0:9001"
ETCD_INITIAL_CLUSTER_STATE="existing"

2.生成node4配置
root@docker:~# cat /opt/etcd/conf/node4.yml
name: node4
data-dir: /opt/etcd/data/node4
listen-client-urls: ‘http://0.0.0.0:9008′
advertise-client-urls: ‘http://0.0.0.0:9008′
listen-peer-urls: ‘http://0.0.0.0:9007′
initial-advertise-peer-urls: ‘http://0.0.0.0:9007′
initial-cluster: node1=http://0.0.0.0:9001,node2=http://0.0.0.0:9003,node3=http://0.0.0.0:9005,node4=http://0.0.0.0:9007
initial-cluster-token: etcd-cluster-1
initial-cluster-state: existing
root@docker:~#

3.启动node4

nohup etcd –config-file=/opt/etcd/conf/node4.yml > node4.log &
 
验证进程
ps -ef |grep etcd
root     12747     1  0 12:39 pts/0    00:00:46 etcd –config-file=/opt/etcd/conf/node1.yml
root     12748     1  0 12:39 pts/0    00:00:58 etcd –config-file=/opt/etcd/conf/node2.yml
root     12966 31275  0 12:44 pts/0    00:00:42 etcd –config-file=/opt/etcd/conf/node3.yml
root     18667 31275  0 14:15 pts/0    00:00:00 etcd –config-file=/opt/etcd/conf/node4.yml

验证member
root@docker:~# etcdctl –endpoints http://127.0.0.1:9002 member list
b5b6e1baef01d74: name=node2 peerURLs=http://0.0.0.0:9003 clientURLs=http://0.0.0.0:9004 isLeader=true
3979a731e0408e32: name=node3 peerURLs=http://0.0.0.0:9005 clientURLs=http://0.0.0.0:9006 isLeader=false
63bd58b500460e51: name=node4 peerURLs=http://0.0.0.0:9007 clientURLs=http://0.0.0.0:9008 isLeader=false
7f630db3033b1564: name=node1 peerURLs=http://0.0.0.0:9001 clientURLs=http://0.0.0.0:9002 isLeader=false
root@docker:~#

验证端口
root@docker:~# netstat -ntlp |grep 900
tcp6       0      0 :::9001                 :::*                    LISTEN      12747/etcd         
tcp6       0      0 :::9002                 :::*                    LISTEN      12747/etcd         
tcp6       0      0 :::9003                 :::*                    LISTEN      12748/etcd         
tcp6       0      0 :::9004                 :::*                    LISTEN      12748/etcd         
tcp6       0      0 :::9005                 :::*                    LISTEN      12966/etcd         
tcp6       0      0 :::9006                 :::*                    LISTEN      12966/etcd         
tcp6       0      0 :::9007                 :::*                    LISTEN      18667/etcd         
tcp6       0      0 :::9008                 :::*                    LISTEN      18667/etcd         
root@docker:~#

 

验证数据  9008为node4
root@docker:~# etcdctl –endpoints http://127.0.0.1:9008 get jdccie
http://www.jdccie.com
root@docker:~#

 

 

数据一致性验证
root@docker:~# etcdctl –endpoints http://127.0.0.1:9002 set jdccie http://www.jdccie.com
http://www.jdccie.com

root@docker:~# etcdctl –endpoints http://127.0.0.1:9002 get jdccie
http://www.jdccie.com
root@docker:~# etcdctl –endpoints http://127.0.0.1:9004 get jdccie
http://www.jdccie.com
root@docker:~# etcdctl –endpoints http://127.0.0.1:9006 get jdccie
http://www.jdccie.com
root@docker:~#

那么问题来了
新加的节点。原来节点配置不变的情况下。重启node1 集群是否正常

验证一下
1063  ps -ef |grep etcd
1064  kill -9 12747
1065  sh +x etcd-cluster1.sh
 
脚本etcd-cluster1.sh
#!/bin/bash
nohup etcd –config-file=/opt/etcd/conf/node1.yml >> node1.log &
nohup etcd –config-file=/opt/etcd/conf/node2.yml >> node2.log &
nohup etcd –config-file=/opt/etcd/conf/node3.yml >> node3.log &
nohup etcd –config-file=/opt/etcd/conf/node4.yml >> node4.log &                     

集群仍存在。并且选择了 node2为主节点
root@docker:~# etcdctl –endpoints http://127.0.0.1:9008 member list
b5b6e1baef01d74: name=node2 peerURLs=http://0.0.0.0:9003 clientURLs=http://0.0.0.0:9004 isLeader=true
3979a731e0408e32: name=node3 peerURLs=http://0.0.0.0:9005 clientURLs=http://0.0.0.0:9006 isLeader=false
63bd58b500460e51: name=node4 peerURLs=http://0.0.0.0:9007 clientURLs=http://0.0.0.0:9008 isLeader=false
7f630db3033b1564: name=node1 peerURLs=http://0.0.0.0:9001 clientURLs=http://0.0.0.0:9002 isLeader=false
root@docker:~#

附节点配置
cat /opt/etcd/conf/*

name: node1
data-dir: /opt/etcd/data/node1
listen-client-urls: ‘http://0.0.0.0:9002′
advertise-client-urls: ‘http://0.0.0.0:9002′
listen-peer-urls: ‘http://0.0.0.0:9001′
#initial-advertise-peer-urls: ‘http://0.0.0.0:9001′
#initial-cluster: node1=http://0.0.0.0:9001,node2=http://0.0.0.0:9003,node3=http://0.0.0.0:9005
#initial-cluster-token: etcd-cluster-1
#initial-cluster-state: new

name: node2
data-dir: /opt/etcd/data/node2
listen-client-urls: ‘http://0.0.0.0:9004′
advertise-client-urls: ‘http://0.0.0.0:9004′
listen-peer-urls: ‘http://0.0.0.0:9003′
#initial-advertise-peer-urls: ‘http://0.0.0.0:9003′
#initial-cluster: node1=http://0.0.0.0:9001,node2=http://0.0.0.0:9003,node3=http://0.0.0.0:9005
#initial-cluster-token: etcd-cluster-1
#initial-cluster-state: new

name: node3
data-dir: /opt/etcd/data/node3
listen-client-urls: ‘http://0.0.0.0:9006′
advertise-client-urls: ‘http://0.0.0.0:9006′
listen-peer-urls: ‘http://0.0.0.0:9005′
initial-advertise-peer-urls: ‘http://0.0.0.0:9005′
initial-cluster: node1=http://0.0.0.0:9001,node2=http://0.0.0.0:9003,node3=http://0.0.0.0:9005
initial-cluster-token: etcd-cluster-1
initial-cluster-state: existing

name: node4
data-dir: /opt/etcd/data/node4
listen-client-urls: ‘http://0.0.0.0:9008′
advertise-client-urls: ‘http://0.0.0.0:9008′
listen-peer-urls: ‘http://0.0.0.0:9007′
initial-advertise-peer-urls: ‘http://0.0.0.0:9007′
initial-cluster: node1=http://0.0.0.0:9001,node2=http://0.0.0.0:9003,node3=http://0.0.0.0:9005,node4=http://0.0.0.0:9007
initial-cluster-token: etcd-cluster-1
initial-cluster-state: existing