0%

make oracle on vagrant box

https://geraldonit.com/2018/06/11/creating-an-oracle-database-vagrant-box/

根据

生成两个vm,使用oraclelinux/7 , 分别是192.168.56.10 192.168.56.11

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138

# -*- mode: ruby -*-
# vi: set ft=ruby :

# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"

# Box metadata location and box name
BOX_URL = "https://oracle.github.io/vagrant-projects/boxes"
BOX_NAME = "oraclelinux/7"

# UI object for printing information
ui = Vagrant::UI::Prefixed.new(Vagrant::UI::Colored.new, "vagrant")

# Define constants
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Use vagrant-env plugin if available
if Vagrant.has_plugin?("vagrant-env")
config.env.load(".env.local", ".env") # enable the plugin
end

# VM name
VM_NAME = default_s('VM_NAME', 'oracle11g-vagrant-whx')

# Memory for the VM (in MB, 2048 MB = 2 GB)
VM_MEMORY = default_i('VM_MEMORY', 4096)

# VM time zone
# If not specified, will be set to match host time zone (if possible)
VM_SYSTEM_TIMEZONE = default_s('VM_SYSTEM_TIMEZONE', host_tz)

# Listener port
VM_LISTENER_PORT = default_i('VM_LISTENER_PORT', 1521)
VM_LISTENER_HOST_PORT = default_i('VM_LISTENER_HOST_PORT', 1521)

# Oracle Database password for SYS and SYSTEM accounts
# If left blank, the password will be generated automatically
VM_ORACLE_PWD = default_s('VM_ORACLE_PWD', '')
end

# Convenience methods
def default_s(key, default)
ENV[key] && ! ENV[key].empty? ? ENV[key] : default
end

def default_i(key, default)
default_s(key, default).to_i
end

def host_tz
# get host time zone for setting VM time zone
# if host time zone isn't an integer hour offset from GMT, fall back to UTC
offset_sec = Time.now.gmt_offset
if (offset_sec % (60 * 60)) == 0
offset_hr = ((offset_sec / 60) / 60)
timezone_suffix = offset_hr >= 0 ? "-#{offset_hr.to_s}" : "+#{(-offset_hr).to_s}"
'Etc/GMT' + timezone_suffix
else
'UTC'
end
end

Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = BOX_NAME
config.vm.box_url = "#{BOX_URL}/#{BOX_NAME}.json"
config.vm.define VM_NAME

# Provider-specific configuration
config.vm.provider "virtualbox" do |v|
v.memory = VM_MEMORY
v.name = VM_NAME
end
config.vm.provider :libvirt do |v|
v.memory = VM_MEMORY
end

config.vm.network :private_network, ip: "192.168.56.10"

# add proxy configuration from host env - optional
if Vagrant.has_plugin?("vagrant-proxyconf")
ui.info "Getting Proxy Configuration from Host..."
has_proxy = false
["http_proxy", "HTTP_PROXY"].each do |proxy_var|
if proxy = ENV[proxy_var]
ui.info "HTTP proxy: " + proxy
config.proxy.http = proxy
has_proxy = true
break
end
end

["https_proxy", "HTTPS_PROXY"].each do |proxy_var|
if proxy = ENV[proxy_var]
ui.info "HTTPS proxy: " + proxy
config.proxy.https = proxy
has_proxy = true
break
end
end

if has_proxy
# Only consider no_proxy if we have proxies defined.
no_proxy = ""
["no_proxy", "NO_PROXY"].each do |proxy_var|
if ENV[proxy_var]
no_proxy = ENV[proxy_var]
ui.info "No proxy: " + no_proxy
no_proxy += ","
break
end
end
config.proxy.no_proxy = no_proxy + "localhost,127.0.0.1"
end
else
["http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY"].each do |proxy_var|
if ENV[proxy_var]
ui.warn 'To enable proxies in your VM, install the vagrant-proxyconf plugin'
break
end
end
end

# VM hostname
config.vm.hostname = VM_NAME

# Oracle port forwarding
config.vm.network "forwarded_port", guest: VM_LISTENER_PORT, host: VM_LISTENER_HOST_PORT

# Provision everything on the first run
config.vm.provision "shell", path: "scripts/install.sh", env:
{
"SYSTEM_TIMEZONE" => VM_SYSTEM_TIMEZONE,
"LISTENER_PORT" => VM_LISTENER_PORT,
"ORACLE_PWD" => VM_ORACLE_PWD
}

end

安装依赖

1
2
3
4
5
yum install binutils  compat-libstdc++-33  compat-libstdc++-33.i686  gcc   gcc-c++  glibc  glibc.i686  glibc-devel  glibc-devel.i686  ksh  libgcc   libgcc.i686  libstdc++ libstdc++.i686 libstdc++-devel  libstdc++-devel.i686  libaio  libaio.i686  libaio-devel  libaio-devel.i686  libXext libXext.i686 libXtst libXtst.i686 libX11 libX11.i686  libXau  libXau.i686 libxcb  libxcb.i686   libXi  libXi.i686  make  sysstat  unixODBC unixODBC-devel  zlib-devel  elfutils-libelf-devel -y

yum install -y xorg-x11-xauth xorg-x11-font-utils xorg-x11-fonts-* xorg-x11-fonts-Type1


修改/etc/ssh/sshd_config

1
2
3
X11forwarding yes
X11UseLocalhost no
X11DisplayOffset 10

退出终端

生成进入x终端的脚本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#!/bin/sh
PORT=$(vagrant ssh-config | grep Port | grep -o '[0-9]\+')

IDFILE=$(vagrant ssh-config |grep IdentityFile|awk '{print $2}')
ssh -Y \
-o UserKnownHostsFile=/dev/null \
-o StrictHostKeyChecking=no \
-o "XAuthLocation=/opt/X11/bin/xauth" \
-i $IDFILE \
vagrant@localhost \
-p $PORT \
"$@"




XQuartz中启动terminal,执行命令:

1
2
3
4
5
./whx.sh
echo $DISPLAY

cd /data/database
./runInstaller

root用户执行:

1
2
/home/vagrant/app/oraInventory/orainstRoot.sh
/home/vagrant/app/vagrant/product/11.2.0/dbhome_1/root.sh

master设置:

设置sid: whx

设置密码:zaq1XSW2

1
2
3
4
5
export ORACLE_SID=whx
export ORACLE_BASE=/home/vagrant/app/vagrant
export ORACLE_HOME=/home/vagrant/app/vagrant/product/11.2.0/dbhome_1

PATH=$PATH:$ORACLE_HOME/bin

slaver设置

设置sid: orcl

设置密码:zaq1XSW2

1
2
3
4
5
export ORACLE_SID=orcl
export ORACLE_BASE=/home/vagrant/app/vagrant
export ORACLE_HOME=/home/vagrant/app/vagrant/product/11.2.0/dbhome_1

PATH=$PATH:$ORACLE_HOME/bin

启动oracle

1
dbstart  $ORACLE_HOME

下面开始试着主从复制

https://www.cnblogs.com/hooly/p/8178570.html

进入主从,分别执行

1
2
3
4
5
6
sqlplus / as sysdba
alter system set aq_tm_processes=2 scope=both; ---启用对队列消息的时间监视
alter system set global_names=true scope=both; ---设置全局名称为true
alter system set undo_retention=3600 scope=both; --设置回滚段时间,默认是900
alter system set streams_pool_size=25M scope=spfile; --sga设置为自动调整情况下不需设置该参数

设置主数据库(whx )为归档mode (以as sysdba身份,可在sqlplus中执行)

(以as sysdba身份,可在sqlplus中执行)

查是否归档,如是归档,请忽略第3点

1
2
3
4
5
6
7
SQL> archive log list;

Database log mode No Archive Mode
Automatic archival Disabled
Archive destination USE_DB_RECOVERY_FILE_DEST
Oldest online log sequence 6
Current log sequence 8

归档设置

1
2
3
4
5
6
7
shutdown immediate;
startup mount;
alter database archivelog;
alter system set LOG_ARCHIVE_DEST_1='LOCATION=/home/vagrant/app/archive'; ---设置归档目录 (执行此句后,在Windows系统文件夹中看看arc文件夹有没有创建成功,如果没有,则手动创建,在执行此语句)
alter database open;
alter system switch logfile; --相应目录检查是否生成arc文件 (如果提示数据库没开启,则先执行alter database open;)

主/从数据新建stream管理用户(在主从数据库都执行以下操作)

1
2
3
4
5
6
sqlplus / as sysdba;
create tablespace tbs_stream datafile '/home/vagrant/app/vagrant/oradata/whx/tbs_stream01.dbf' size 2000m autoextend on maxsize unlimited segment space management auto; ---创建主环境的Stream专用表空间
execute dbms_logmnr_d.set_tablespace('tbs_stream'); --将logminer的数据字典从system表空间转移到新建的表空间,防止撑满system表空间
create user strmadmin identified by strmadmin default tablespace tbs_stream temporary tablespace temp; --创建用户
grant dba to strmadmin; --直接给dba权限.

1
2
3
4
5
6
7
sqlplus / as sysdba;
create tablespace tbs_stream datafile '/home/vagrant/app/vagrant/oradata/orcl/tbs_stream01.dbf' size 2000m autoextend on maxsize unlimited segment space management auto; ---创建主环境的Stream专用表空间
execute dbms_logmnr_d.set_tablespace('tbs_stream'); --将logminer的数据字典从system表空间转移到新建的表空间,防止撑满system表空间
create user strmadmin identified by strmadmin default tablespace tbs_stream temporary tablespace temp; --创建用户
grant dba to strmadmin; --直接给dba权限.


主数据库新建连接从数据库的link

1
2
3
4
5
6
7
8
9
create public database link orcl connect to strmadmin IDENTIFIED BY strmadmin
using '(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.56.11)(PORT = 1521))
)
(CONNECT_DATA =
(SID= orcl)
)
)';

从数据库新建连接主数据库的link

1
2
3
4
5
6
7
8
9
create public database link whx connect to strmadmin IDENTIFIED BY strmadmin
using '(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.56.10)(PORT = 1521))
)
(CONNECT_DATA =
(SID = whx)
)
)';

select sysdate from dual@whx

主数据库流队列创建 (可在plsql中的SQL中执行,登录名应为strmadmin)

1
2
3
4
5
connect strmadmin/strmadmin  
begin
dbms_streams_adm.set_up_queue(queue_table => 'whx_queue_table',queue_name => 'whx_queue');
end;
/

提示:PL/SQL procedure successfully completed.

从数据库流队列创建

1
2
3
4
5
connect strmadmin/strmadmin  --以strmadmin身份,登录从数据库。
begin
dbms_streams_adm.set_up_queue(queue_table => 'orcl_queue_table',queue_name => 'orcl_queue');
end;
/

提示:PL/SQL procedure successfully completed.

主数据库创建捕获进程

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
connect strmadmin/strmadmin

begin
dbms_streams_adm.add_schema_rules(
schema_name => 'strmadmin',
streams_type => 'capture',
streams_name => 'capture_whx',
queue_name => 'strmadmin.whx_queue',
include_dml => true,
include_ddl => true,
include_tagged_lcr => false,
source_database => null,
inclusion_rule => true);
end;
/

提示:PL/SQL procedure successfully completed.

从数据库实例化strmadmin用户 (这两个路径须一致)

1
2
3
exp strmadmin/strmadmin@whx file='/home/vagrant/crm.dmp' object_consistent=y rows=y
exp strmadmin/strmadmin@orcl file='/home/vagrant/crm.dmp' object_consistent=y rows=y

这个执行失败

1
2
3
EXP-00056: ORACLE error 12154 encountered
ORA-12154: TNS:could not resolve the connect identifier specified
EXP-00000: Export terminated unsuccessfully

修改 /home/vagrant/app/vagrant/product/11.2.0/dbhome_1/network/admin/tnsnames.ora增加 WHX部分内容

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

ORCL =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = oracle11g-vagrant-slave)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = orcl)
)
)

WHX =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.56.10)(PORT = 1521))
(CONNECT_DATA =
(SID = whx)
)
)

然后导出正常。

在从数据库新建strmadmin

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
imp strmadmin/strmadmin@orcl file='/home/vagrant/crm.dmp' ignore=y commit=y streams_instantiation=y full=y


Import: Release 11.2.0.1.0 - Production on Tue Oct 31 23:08:12 2023

Copyright (c) 1982, 2009, Oracle and/or its affiliates. All rights reserved.


Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.1.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options

Export file created by EXPORT:V11.02.00 via conventional path
import done in US7ASCII character set and AL16UTF16 NCHAR character set
import server uses AL32UTF8 character set (possible charset conversion)
. importing STRMADMIN's objects into STRMADMIN
. . importing table "AQ$_WHX_QUEUE_TABLE_C" 0 rows imported
. . importing table "AQ$_WHX_QUEUE_TABLE_G" 0 rows imported
. . importing table "AQ$_WHX_QUEUE_TABLE_H"
Note: table contains ROWID column, values may be obsolete 0 rows imported
. . importing table "AQ$_WHX_QUEUE_TABLE_I"
Note: table contains ROWID column, values may be obsolete 0 rows imported
. . importing table "AQ$_WHX_QUEUE_TABLE_L" 0 rows imported
. . importing table "AQ$_WHX_QUEUE_TABLE_S" 1 rows imported
. . importing table "AQ$_WHX_QUEUE_TABLE_T" 0 rows imported
. . importing table "WHX_QUEUE_TABLE" 0 rows imported
Import terminated successfully without warnings.

主数据库创建传播进程

1
2
3
4
5
6
7
8
9
10
11
12
13
14
connect strmadmin/strmadmin
begin
dbms_streams_adm.add_schema_propagation_rules(
schema_name => 'strmadmin',
streams_name => 'whx_to_orcl',
source_queue_name => 'strmadmin.whx_queue',
destination_queue_name => 'strmadmin.orcl_queue@orcl',
include_dml => true,
include_ddl => true,
include_tagged_lcr => false,
source_database => 'whx',
inclusion_rule => true);
end;
/

注意:此段语句执行可能会报错,如果报错,不用管,继续执行后面的。

修改propagation休眠时间为0,表示实时传播LCR,latency以秒为单位

1
2
3
4
5
6
7
begin
dbms_aqadm.alter_propagation_schedule(
queue_name => 'whx_queue',
destination => 'orcl',
latency => 0);
end;
/

从数据创建应用进程

1
2
3
4
5
6
7
8
9
10
11
12
13
14
connect strmadmin/strmadmin
begin
dbms_streams_adm.add_schema_rules(
schema_name => 'strmadmin',
streams_type => 'apply',
streams_name => 'apply_orcl',
queue_name => 'strmadmin.orcl_queue',
include_dml => true,
include_ddl => true,
include_tagged_lcr => false,
source_database => 'whx',
inclusion_rule => true);
end;
/

启动Stream

从数据库启动应用进程

1
2
3
4
5
6
connect strmadmin/strmadmin
begin
dbms_apply_adm.start_apply(
apply_name => 'apply_orcl');
end;
/

主数据库启动捕获进程

1
2
3
4
5
begin
dbms_capture_adm.start_capture(
capture_name => 'capture_whx');
end;
/

现在就可以进行测试了,在whx用户中作何一个测试表新增数据,删除数据,增加表,修改表结构,进行同步测试

oracle学习

新增oracle用户并用oracle登录

增加到bash_profile

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Oracle Settings
TMP=/tmp; export TMP
TMPDIR=$TMP; export TMPDIR

ORACLE_HOSTNAME=wghl09; export ORACLE_HOSTNAME
ORACLE_UNQNAME=server; export ORACLE_UNQNAME
ORACLE_BASE=/data/oracle; export ORACLE_BASE
ORACLE_HOME=$ORACLE_BASE/app/product/11.2.0/db_1; export ORACLE_HOME
ORACLE_SID=hwdb; export ORACLE_SID
ORACLE_TERM=xterm; export ORACLE_TERM
PATH=/usr/sbin:$PATH; export PATH
PATH=$ORACLE_HOME/bin:$PATH; export PATH

LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib; export LD_LIBRARY_PATH
CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib; export CLASSPATH

安装依赖

1
2
3
4
5
6

yum install binutils compat-libstdc++-33 compat-libstdc++-33.i686 gcc gcc-c++ glibc glibc.i686 glibc-devel glibc-devel.i686 ksh libgcc libgcc.i686 libstdc++ libstdc++.i686 libstdc++-devel libstdc++-devel.i686 libaio libaio.i686 libaio-devel libaio-devel.i686 libXext libXext.i686 libXtst libXtst.i686 libX11 libX11.i686 libXau libXau.i686 libxcb libxcb.i686 libXi libXi.i686 make sysstat unixODBC unixODBC-devel zlib-devel elfutils-libelf-devel -y

yum install -y xorg-x11-xauth xorg-x11-font-utils xorg-x11-fonts-* xorg-x11-fonts-Type1


修改/etc/ssh/sshd_config

1
2
3
X11forwarding yes
X11UseLocalhost no
X11DisplayOffset 10

XQuartz中启动terminal,执行命令:

1
2
3
4
5
ssh -Y oracle@**** -o "XAuthLocation=/opt/X11/bin/xauth"
echo $DISPLAY
192.168.1.19:10.0
cd /data/database
./runInstaller

设置密码:

docker-compose 安装oracle

使用 https://hub.docker.com/r/oracleinanutshell/oracle-xe-11g 镜像

创建docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
version: '3'

services:
oracle-db:
image: oracleinanutshell/oracle-xe-11g:latest
ports:
- 1521:1521
- 5500:5500
environment:
- ORACLE_ALLOW_REMOTE:true

拉镜像

docker-compose pull

启动后

docker-compose up -d

复制/u01 到 ./data

docker cp dck_oracle-db_1:/u01 ./data

然后修改docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
version: '3'

services:
oracle-db:
image: oracleinanutshell/oracle-xe-11g:latest
ports:
- 1521:1521
- 5500:5500
environment:
- ORACLE_ALLOW_REMOTE:true
volumes:
- ./data:/u01

这时候数据就保存在本地硬盘了

docker-compose exec oracle-db bash

sql-plus

user:system

pwd: oracle

By default, the password verification is disable(password never expired)
Connect database with following setting:

1
2
3
4
5
hostname: localhost
port: 49161
sid: xe
username: system
password: oracle

Password for SYS & SYSTEM

1
oracle

最新版

https://container-registry.oracle.com/

这里是直接下载各种版本的地方 https://www.oracle.com/cn/database/technologies/oracle-database-software-downloads.html#db_free

为了安装docker版本, 必须在 https://container-registry.oracle.com/ords/f?p=113:1:102342682778790:::1:P1_BUSINESS_AREA:3&cs=3T4nkcPYyj6ewF0qib0dU82LAmedE6CKr9q8rmOAgxLycqz9EVlXaoK41xRcvBalvRR4e6uVOdKMXmQIvBkPMdw 同意oracle的terms后才能pull

1
2
3
4
5
docker login container-registry.oracle.com/

user: wanghongxing@gmail.com

pwd: *Y4r6?C@~c7jX!h

Tags

Download Mirror

Tag OS/Architecture Pull Command Last Updated
19.3.0.0 linux/amd64 docker pull container-registry.oracle.com/database/enterprise:19.3.0.0 5 weeks ago
latest linux/amd64 docker pull container-registry.oracle.com/database/enterprise:latest 5 weeks ago
21.3.0.0 linux/amd64 docker pull container-registry.oracle.com/database/enterprise:21.3.0.0 5 weeks ago
19.19.0.0 linux/arm64 docker pull container-registry.oracle.com/database/enterprise:19.19.0.0 2 months ago
12.2.0.1 linux/amd64 docker pull container-registry.oracle.com/database/enterprise:12.2.0.1 6.1 years ago
12.2.0.1-slim linux/amd64 docker pull container-registry.oracle.com/database/enterprise:12.2.0.1-slim 6.1 years ago
12.1.0.2 linux/amd64 docker pull container-registry.oracle.com/database/enterprise:12.1.0.2 6.2 years ago

vagrant centos7 硬盘扩容

vagrantfile 中增加

1
config.disksize.size = "200GB"

vagrant reload

安装工具

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
vagrant ssh

yum install cloud-utils-growpart gdisk -y

# growpart /dev/sda 1
CHANGED: partition=1 start=2048 old: size=83884032 end=83886080 new: size=419428319 end=419430367

# xfs_growfs /
meta-data=/dev/sda1 isize=512 agcount=4, agsize=2621376 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0 spinodes=0
data = bsize=4096 blocks=10485504, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal bsize=4096 blocks=5119, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 10485504 to 52428539
# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 1.9G 0 1.9G 0% /dev
tmpfs 1.9G 0 1.9G 0% /dev/shm
tmpfs 1.9G 8.5M 1.9G 1% /run
tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup
/dev/sda1 200G 3.6G 197G 2% /
tmpfs 379M 0 379M 0% /run/user/1000

docker build multi platform

macbook arm版生成镜像时候是arm64 架构的,没法在服务器运行,为了给服务器打镜像。

改成 如下命令可以生成服务器运行的镜像

1
docker   build --platform linux/amd64 .

但是不能在本机运行,改成如下命令生成多架构的镜像

1
docker buildx build --platform linux/amd64,linux/arm64

但是提示

ERROR: multiple platforms feature is currently not supported for docker driver. Please switch to a different driver (eg. “docker buildx create –use”)

然后学习后发现需要

1
docker buildx create --name mybuilder --driver docker-container

然后就可以了。

mysql 创建用户的时候提示密码太简单

1
2
mysql> CREATE USER  'hardoop'@'%'  IDENTIFIED BY '123456';
ERROR 1819 (HY000): Your password does not satisfy the current policy requirements
1
2
3
4
5
6
7
8
9
10
11
12
mysql> SHOW VARIABLES LIKE 'validate_password%';
+--------------------------------------+--------+
| Variable_name | Value |
+--------------------------------------+--------+
| validate_password.check_user_name | ON |
| validate_password.dictionary_file | |
| validate_password.length | 8 |
| validate_password.mixed_case_count | 1 |
| validate_password.number_count | 1 |
| validate_password.policy | MEDIUM |
| validate_password.special_char_count | 1 |
+--------------------------------------+--------+
1
2
3
4
5
6
7
8
9
10
11
12
mysql> SET GLOBAL validate_password.length = 6;
Query OK, 0 rows affected (0.00 sec)

mysql> SET GLOBAL validate_password.number_count = 0;
Query OK, 0 rows affected (0.00 sec)
mysql> SET GLOBAL validate_password.policy = 0;
Query OK, 0 rows affected (0.00 sec)



mysql> CREATE USER 'hardoop'@'%' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)

macbook 安装hadoop hive

1
2
3
brew install hardoop

brew install hive

配置hadoop

hadoop安装的是3.3.4

echo “127.0.0.1 wanghongxing” >> /etc/hosts

目录是 /usr/local/Cellar/hadoop/3.3.4/

进入 /usr/local/Cellar/hadoop/3.3.4/目录,

1
2
3
4
cd  /usr/local/Cellar/hadoop/3.3.4/
mkdir tmp
mkdir -p dfs/name
mkdir hadoop

进入libexec目录,修改etc下的配置文件

  • core-site.xml
  • hdfs-site.xml
  • mapred-site.xml
  • yarn-site.xml

修改 core-site.xml 文件

设置 Hadoop 的临时目录和文件系统,localhost:9000 表示本地主机。如果使用远程主机,要用相应的 IP 地址来代替,填写远程主机的域名,则需要到 /etc/hosts 文件中做 DNS 映射。在 core-site.xml 文件里作如下配置:

1
2
3
4
5
6
7
8
9
10
11
12
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://localhost:9000</value>
</property>

<!--用来指定hadoop运行时产生文件的存放目录 自己创建-->
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/Cellar/hadoop/3.3.4/tmp</value>
</property>
</configuration>

修改 hdfs-site.xml 文件

hdfs-site.xml 的配置修改如下,注意 name 和 data 的路径都要替换成本地的路径:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<!--不是root用户也可以写文件到hdfs-->
<property>
<name>dfs.permissions</name>
<value>false</value> <!--关闭防火墙-->
</property>
<!--把路径换成本地的name坐在位置-->
<property>
<name>dfs.namenode.name.dir</name>
<value>/usr/local/Cellar/hadoop/3.3.4/dfs/name</value>
</property>
<!--在本地新建一个存放hadoop数据的文件夹,然后将路径在这里配置一下-->
<property>
<name>dfs.datanode.data.dir</name>
<value>/usr/local/Cellar/hadoop/3.3.4/hadoop</value>
</property>
</configuration>

修改 mapred-site.xml 文件

由于根目录下 etc/hadoop 中没有 mapred-site.xml 文件,所以需要创建该文件。但是目录中提供了 mapred-site.xml.template 模版文件。我们将其重命名为 mapred-site.xml,然后将 yarn 设置成数据处理框架:

1
2
3
4
5
6
7
<configuration>
<property>
<!--指定mapreduce运行在yarn上-->
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>

修改 yarn-site.xml 文件

配置数据的处理框架 yarn:

1
2
3
4
5
6
7
8
9
10
11
<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>localhost:8088</value>
</property>
</configuration>

名称节点设置

1
2
$ hdfs namenode -format

验证Hadoop

1
$ sbin/start-all.sh

在浏览器访问Hadoop

​ 访问Hadoop的默认端口号为9870(老版本的是 50070 ). 使用以下网址,以获取浏览器Hadoop服务。

1
http://localhost:9870/

验证集群的所有应用程序

​ 访问集群中的所有应用程序的默认端口号为8088。使用以下URL访问该服务。

1
http://localhost:8088/

重启

关闭

1
2
3
4
5
6
7

sbin/stop-all.sh

rm -rf tmp/dfs

sbin/start-all.sh

至此,hadoop看着么有问题。


配置hive

在 ~/.bash_profile文件新增

1
2
3
4
5
#Setting PATH for Hive

export HIVE_HOME=/usr/local/Cellar/hive/3.1.3/libexec

export PATH=$PATH:HIVE_HOME/bin

mysql

1
2
3
4
5
6
7
create database hivestore;

CREATE USER 'hardoop'@'%' IDENTIFIED BY '123456';

GRANT ALL PRIVILEGES ON *.* TO 'hardoop'@'%' WITH GRANT OPTION;

flush privileges;

**修改Hive配置文件

1
2
3
cd $HIVE_HOME/conf
cp hive-default.xml.template hive-site.xml
vim hive-site.xml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
<configuration>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hadoop</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>123456</value>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost:3306/hivestore</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.cj.jdbc.Driver</value>
</property>

<property>
<name>hive.exec.local.scratchdir</name>
<value>/usr/local/Cellar/hive/3.1.3/libexec/iotmp</value>
</property>
<property>
<name>hive.querylog.location</name>
<value>/usr/local/Cellar/hive/3.1.3/libexec/iotmp</value>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>/usr/local/Cellar/hive/3.1.2/libexec/iotmp</value>
</property>

</configuration>

下载mysql连接器

https://dev.mysql.com/downloads/connector/j/

mysql-connector,下载选platform independent的操作系统。解压以后,把jar文件复制到/usr/local/Cellar/hive/3.1.3/libexec/lib目录下面。

在/usr/local/Cellar/hive/3.1.2/libexec/(即$HIVE_HOME)文件夹内新建iotmp文件夹

初始化库

在/usr/local/Cellar/hive/3.1.2/libexec/bin目录下

1
schematool -initSchema -dbType mysql

查看初始化信息

1
schematool -dbType mysql -info

启动hive

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
wanghongxing:~ whx$ hive
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/Cellar/hive/3.1.3/libexec/lib/log4j-slf4j-impl-2.17.1.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/Cellar/hadoop/3.3.4/libexec/share/hadoop/common/lib/slf4j-reload4j-1.7.36.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
Hive Session ID = 8a401692-9f09-41f6-babb-0ac77fd8eb16

Logging initialized using configuration in jar:file:/usr/local/Cellar/hive/3.1.3/libexec/lib/hive-common-3.1.3.jar!/hive-log4j2.properties Async: true
Hive Session ID = 380b0568-e219-453f-abf8-12eb4d7cf331
Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
hive> show databases ;
OK
default
Time taken: 0.798 seconds, Fetched: 1 row(s)
hive>


折腾了半天,重新看hadoop,启动总是出问题,因为自己是arm芯片的macbook,决定用docker的方式折腾。

https://github.com/big-data-europe/docker-hadoop clone 了他的代码库

看他头打包镜像的脚本,执行make build就可以打包。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
DOCKER_NETWORK = docker-hadoop_default
ENV_FILE = hadoop.env
current_branch := $(shell git rev-parse --abbrev-ref HEAD)
build:
docker build -t bde2020/hadoop-base:$(current_branch) ./base
docker build -t bde2020/hadoop-namenode:$(current_branch) ./namenode
docker build -t bde2020/hadoop-datanode:$(current_branch) ./datanode
docker build -t bde2020/hadoop-resourcemanager:$(current_branch) ./resourcemanager
docker build -t bde2020/hadoop-nodemanager:$(current_branch) ./nodemanager
docker build -t bde2020/hadoop-historyserver:$(current_branch) ./historyserver
docker build -t bde2020/hadoop-submit:$(current_branch) ./submit

wordcount:
docker build -t hadoop-wordcount ./submit
docker run --network ${DOCKER_NETWORK} --env-file ${ENV_FILE} bde2020/hadoop-base:$(current_branch) hdfs dfs -mkdir -p /input/
docker run --network ${DOCKER_NETWORK} --env-file ${ENV_FILE} bde2020/hadoop-base:$(current_branch) hdfs dfs -copyFromLocal -f /opt/hadoop-3.2.3/README.txt /input/
docker run --network ${DOCKER_NETWORK} --env-file ${ENV_FILE} hadoop-wordcount
docker run --network ${DOCKER_NETWORK} --env-file ${ENV_FILE} bde2020/hadoop-base:$(current_branch) hdfs dfs -cat /output/*
docker run --network ${DOCKER_NETWORK} --env-file ${ENV_FILE} bde2020/hadoop-base:$(current_branch) hdfs dfs -rm -r /output
docker run --network ${DOCKER_NETWORK} --env-file ${ENV_FILE} bde2020/hadoop-base:$(current_branch) hdfs dfs -rm -r /input

另外Dockerfile是从debian 来做,安装arm64版jdk比较麻烦,因为改成openjdk11,同时把jdk相应的安装去掉了,顺便把hardoop升级到3.2.3

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
FROM openjdk:11

MAINTAINER Ivan Ermilov <ivan.s.ermilov@gmail.com>
MAINTAINER Giannis Mouchakis <gmouchakis@iit.demokritos.gr>

RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
net-tools \
curl \
netcat \
gnupg \
libsnappy-dev \
&& rm -rf /var/lib/apt/lists/*

# ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/

RUN curl -O https://dist.apache.org/repos/dist/release/hadoop/common/KEYS

RUN gpg --import KEYS

ENV HADOOP_VERSION 3.2.3
ENV HADOOP_URL https://www.apache.org/dist/hadoop/common/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz

RUN set -x \
&& curl -fSL "$HADOOP_URL" -o /tmp/hadoop.tar.gz \
&& curl -fSL "$HADOOP_URL.asc" -o /tmp/hadoop.tar.gz.asc \
&& gpg --verify /tmp/hadoop.tar.gz.asc \
&& tar -xvf /tmp/hadoop.tar.gz -C /opt/ \
&& rm /tmp/hadoop.tar.gz*

RUN ln -s /opt/hadoop-$HADOOP_VERSION/etc/hadoop /etc/hadoop

RUN mkdir /opt/hadoop-$HADOOP_VERSION/logs

RUN mkdir /hadoop-data

ENV HADOOP_HOME=/opt/hadoop-$HADOOP_VERSION
ENV HADOOP_CONF_DIR=/etc/hadoop
ENV MULTIHOMED_NETWORK=1
ENV USER=root
ENV PATH $HADOOP_HOME/bin/:$PATH

ADD entrypoint.sh /entrypoint.sh

RUN chmod a+x /entrypoint.sh

ENTRYPOINT ["/entrypoint.sh"]

这个弄好后hadoop没有问题,但是加上 hive后总是有问题。

拿出intel芯片的macbook,这样搞。

需求:

云端机房有几台 ECS 做开发服务器,有 RDS 、MQ、ES、MongoDB、Nacos ,大家用spring cloud微服务做开发,为了保证大家顺利开发,需要在保证安全的情况下让大家可以随时连接云端组件。

方案:

1、买云服务商的vpn网关:花点钱的事情;

2、自己在ECS中安装vpn,每个开发人员接入vpn后开发。

下面主要讲第二种方案。

网络情况:

云端vpc的网段是10.0.0.0/24

服务器一:安装ES、MongoDb、Doris等组件;IP:10.0.0.11

服务器二:安装nacos、redis、docker服务;IP:10.0.0.10

服务器三:docker服务;IP:10.0.0.12

计划:

1、在服务器一上安装openvpn,接入客户端的ip段规划为10.8.0.0/24,让所有vpn接入的客户端可以直接访问服务器二、三;

2、服务器一、二、三上可以直接连通所有vpn接入的客户端;

3、vpn接入的客户端可以连通服务器二、三上的docker容器中的服务;

1是 openvpn的基本功能;

2需要在openvpn 服务端给vpn客户端推送路由信息;

3需要服务器二三上需要被访问的docker容器接入直接的docker network,然后在openvpn服务端给vpn客户端推送到每个docker network的路由。

先处理docker network

在服务器二、服务器三分别创建单独的docker network:

服务器二: docker network create --subnet=10.10.0.0/24 cem-network

服务器三:docker network create --subnet=10.12.0.0/24 cem-network

然后这两个服务器中需要被vpn客户端访问的容器都需要创建在cem-network中。

安装openvpn服务

先启动防火墙,需要防火墙做转发(也可以用iptables,这个最近这些年很少用了)

1
2
sudo systemctl enable firewalld
sudo systemctl start firewalld

安装openvpn软件

1
2
3
4
5
6
7
8
yum -y install epel-release
yum install openvpn
wget https://github.com/OpenVPN/easy-rsa-old/archive/2.3.3.tar.gz
tar xfz 2.3.3.tar.gz
mkdir /etc/openvpn/easy-rsa
cp -rf easy-rsa-old-2.3.3/easy-rsa/2.0/* /etc/openvpn/easy-rsa
cp /usr/share/doc/openvpn-2.4.12/sample/sample-config-files/server.conf /etc/openvpn

需改/etc/openvpn/server.conf配置文件,大致需要修改如下几点:

然后在openvpn配制中增加到10.10.0.0/24的路由。

1
2
3
4
5
6
7
8
9
10
11
12
13
client-to-client
#下面这行注释掉
;tls-auth ta.key 0
#添加这个
tls-crypt myvpn.tlsauth

#添加到云端服务器的路由
#所有 10.0.0/24的都通过openvpn服务器来访问
push "route 10.0.0.0 255.255.255.0"
#访问10.10.0/24 容器的都通过10.0.0.10来路由
push "route 10.10.0.0 255.255.255.0 10.0.0.10 1"
#访问10.12.0/24 容器的都通过10.0.0.12来路由
push "route 10.12.0.0 255.255.255.0 10.0.0.12 1"
1
2
3
4
5
6
7
8
9
10
11
12
13
openvpn --genkey --secret /etc/openvpn/myvpn.tlsauth
mkdir /etc/openvpn/easy-rsa/keys
cd /etc/openvpn/easy-rsa
source ./vars
./clean-all
./build-ca
./build-key-server server
./build-dh
cd /etc/openvpn/easy-rsa/keys
cp dh2048.pem ca.crt server.crt server.key /etc/openvpn
cd /etc/openvpn/easy-rsa

./build-key wanghongxing

然后修改防火墙配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18


firewall-cmd --zone=public --add-port=1194/tcp --permanent
firewall-cmd --zone=public --add-port=1194/udp --permanent
firewall-cmd --zone=trusted --add-service openvpn --permanent

firewall-cmd --list-services --zone=trusted
firewall-cmd --add-masquerade
firewall-cmd --permanent --add-masquerade
firewall-cmd --query-masquerade
firewall-cmd --permanent --add-interface=tun0
firewall-cmd --permanent --add-service=openvpn
firewall-cmd --permanent --direct --passthrough ipv4 -t nat -A POSTROUTING -s 10.8.0.0/24 -o eth0 -j MASQUERADE
firewall-cmd --reload

echo "net.ipv4.ip_forward = 1" >>/etc/sysctl.conf
systemctl restart openvpn@server.service

制作vpn客户端文件

把 下面这几个文件复制下载到客户端电脑

1
2
3
4
5
/etc/openvpn/ca.crt 
/etc/openvpn/myvpn.tlsauth
/etc/openvpn/easy-rsa/keys/wanghongxing.crt
/etc/openvpn/easy-rsa/keys/wanghongxing.csr
/etc/openvpn/easy-rsa/keys/wanghongxing.key

然后编辑生成 whx.ovpn

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
client
dev tun
proto udp
remote 114.116.201.xxx
resolv-retry infinite
nobind
persist-key
persist-tun
ca ca.crt
cert wanghongxing.crt
key wanghongxing.key
remote-cert-tls server
tls-crypt myvpn.tlsauth
verb 3

下载openvpn客户端

mac用户下载 Tunnelblick ,windows用户下载.

然后倒入whx.ovpn

在服务器二三设置路由

其服务器二三设置访问vpn客户端的路由

1
2
3
4
ip route add 10.8.0.0/24 via 10.0.0.11 dev eth0
#这样以后重启了也有效
echo "10.8.0.0/24 via 10.0.0.11 dev eth0" >> /etc/sysconfig/network-scripts/route-eth0

在服务器二设置访问服务器三容器的路由

1
2
ip route add 10.12.0.0/24 via 10.0.0.12 dev eth0
echo "10.12.0.0/24 via 10.0.0.12 dev eth0" >> /etc/sysconfig/network-scripts/route-eth0

在服务器三设置访问服务器二容器的路由

1
2
ip route add 10.10.0.0/24 via 10.0.0.10 dev eth0
echo "10.10.0.0/24 via 10.0.0.10 dev eth0" >> /etc/sysconfig/network-scripts/route-eth0

在服务器一设置访问服务器二三容器的路由

1
2
3
4
5
ip route add 10.12.0.0/24 via 10.0.0.12 dev eth0
ip route add 10.10.0.0/24 via 10.0.0.10 dev eth0
echo "10.12.0.0/24 via 10.0.0.12 dev eth0" >> /etc/sysconfig/network-scripts/route-eth0
echo "10.10.0.0/24 via 10.0.0.10 dev eth0" >> /etc/sysconfig/network-scripts/route-eth0

ip route add 172.18.0.0/24 via 10.0.0.10 dev eth0

1
2
3
4
5
6
7
8
9
firewall-cmd --zone=public --add-port=80/tcp --permanent
firewall-cmd --zone=public --add-port=443/tcp --permanent
firewall-cmd --zone=public --add-port=8848/tcp --permanent
firewall-cmd --zone=public --add-port=9848/tcp --permanent
firewall-cmd --permanent --zone=trusted --change-interface=docker0

firewall-cmd --zone=public --add-port=6379/tcp --permanent
firewall-cmd --zone=public --add-port=9876/tcp --permanent

docker network create –subnet=10.10.0.0/24 cem-network

linux中手动添加路由的方法:

ip route add 10.10.0.0/24 via 10.0.0.10 dev eth0

macos中手动添加路由的方法:

1
sudo route -n add -net 10.10.0.0/24 10.0.0.10

为了让spring cloud微服务注册到nacos的时候使用特定ip段,需要在bootstrap中设置 preferred-networks ,比如内网用户设置为:

1
2
3
4
5
6
spring:
cloud:
inetutils:
preferred-networks:
- 10.8

比如cem容器中设置为:

1
2
3
4
5
spring:
cloud:
inetutils:
preferred-networks:
- 10.10

安装doris

yum install java-1.8.0-openjdk.x86_64 java-1.8.0-openjdk-devel.x86_64

cd /data/application

1
2
3
4
5
6
7
8
9
wget https://mirrors.tuna.tsinghua.edu.cn/apache/doris/1.1/1.1.5-rc02/apache-doris-fe-1.1.5-bin.tar.gz
tar zxf apache-doris-fe-1.1.5-bin.tar.gz

wget https://mirrors.tuna.tsinghua.edu.cn/apache/doris/1.1/1.1.5-rc02/apache-doris-be-1.1.5-bin-x86_64.tar.gz
tar zxf apache-doris-be-1.1.5-bin-x86_64.tar.gz




fe

1
2
3
4
5
cd apache-doris-fe-1.1.5-bin


echo "priority_networks=10.0.0.0/24" >>vi conf/fe.conf
./bin/start_fe.sh --daemon

看看是不是启动成功

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
curl http://127.0.0.1:8030/api/bootstrap
{"msg":"success",
"code":0,
"data":{"replayedJournalId":0,"queryPort":0,"rpcPort":0,"version":""},
"count":0}


rpm -ivh https://repo.mysql.com//mysql57-community-release-el7-11.noarch.rpm
yum install mysql-community-client.x86_64

mysql -uroot -P9030 -h127.0.0.1

show frontends\G


1
2
3
4
5
6
cd /data/application/apache-doris-be-1.1.5-bin-x86_64
echo "priority_networks=10.0.0.0/24" >>vi conf/be.conf
sysctl -w vm.max_map_count=2000000
bin/start_be.sh --daemon


吧be加入fe

1
2
3
4

mysql -uroot -P9030 -h127.0.0.1

ALTER SYSTEM ADD BACKEND "10.0.0.11:9050";

设置密码

1
SET PASSWORD FOR 'root' = PASSWORD('7Kf8o_Wqid8HVJ6h');
1
2
3
4
5
6
7
10000,2017-10-01,北京,20,0,2017-10-01 06:00:00,20,10,10
10000,2017-10-01,北京,20,0,2017-10-01 07:00:00,15,2,2
10001,2017-10-01,北京,30,1,2017-10-01 17:05:45,2,22,22
10002,2017-10-02,上海,20,1,2017-10-02 12:59:12,200,5,5
10003,2017-10-02,广州,32,0,2017-10-02 11:20:00,30,11,11
10004,2017-10-01,深圳,35,0,2017-10-01 10:00:15,100,3,3
10004,2017-10-03,深圳,35,0,2017-10-03 10:20:22,11,6,6

maven单独编译包

mvn package -pl guyuai-module-cem/guyuai-module-cem-biz -am -Dmaven.test.skip=true