“ubuntu教程”ubuntu14.04server 下安装openstack juno

来源: 电脑维修教程 阅读:     发表时间:

http://czbinghe.blog.51cto.com/9824423/1622638 系统利用ubuntu14.04server 安装之前所有服务器openstack更新源设定: apt-get install python-software-properties apt-get install software-

http://czbinghe.blog.51cto.com/9824423/1622638

系统利用ubuntu14.04server

安装之前所有服务器openstack更新源设定:

apt-get install python-software-properties

apt-get install software-properties-common

add-apt-repository cloud-archive:juno

apt-get update && apt-get dist-upgrade

安装时候同步服务

apt-get install -y ntp

vim /etc/ntp.conf

其他server都注释失落

server 10.0.0.11

重启ntp服务

service ntp restart

ip商定

controller

192.168.2.11

10.0.0.11

network

192.168.2.22

10.0.0.22

10.0.1.22

compute

192.168.2.33(安装设置装备摆设好后可以断开外网)

10.0.0.33

10.0.1.33

具体安装设置装备摆设进程

收集设置装备摆设

controller服务器

vim /etc/hostname 并写入

controller

vim /etc/hosts 并写入

10.0.0.11controller

10.0.0.22network

10.0.0.33compute

vim /etc/network/interfaces

auto lo

iface lo inet loopback

# the primary network interface

auto eth0

iface eth0 inet static

address 192.168.2.11

netmask 255.255.0.0

network 192.168.0.0

broadcast 192.168.255.255

gateway 192.168.1.1

# dns-* options are implemented by the resolvconf package, if installed

dns-nameservers 192.168.1.1

auto eth1

iface eth1 inet static

address 10.0.0.11

netmask 255.255.255.0

gateway 10.0.0.1

network服务器

vim /etc/hostname 并写入

network

vim /etc/hosts 并写入

10.0.0.11controller

10.0.0.22network

10.0.0.33compute

root@network:~# vim /etc/network/interfaces

# this file describes the network interfaces available on your system

# and how to activate them. for more information, see interfaces(5).

# the loopback network interface

auto lo

iface lo inet loopback

# the primary network interface

auto eth0

iface eth0 inet static

address 192.168.2.22

netmask 255.255.0.0

network 192.168.0.0

broadcast 192.168.255.255

gateway 192.168.1.1

# dns-* options are implemented by the resolvconf package, if installed

dns-nameservers 192.168.1.1

auto eth1

iface eth1 inet static

address 10.0.0.22

netmask 255.255.255.0

gateway 10.0.0.1

auto eth2

iface eth2 inet static

address 10.0.1.22

netmask 255.255.255.0

compute服务器

vim /etc/hostname 并写入

network

vim /etc/hosts 并写入

10.0.0.11controller

10.0.0.22network

10.0.0.33compute

root@network:~# vim /etc/network/interfaces

auto lo

iface lo inet loopback

auto eth0

iface eth0 inet static

address 192.168.2.33

netmask 255.255.0.0

network 192.168.0.0

broadcast 192.168.255.255

gateway 192.168.1.1

# dns-* options are implemented by the resolvconf package, if installed

dns-nameservers 192.168.1.1

auto eth1

iface eth1 inet static

address 10.0.0.33

netmask 255.255.255.0

gateway 10.0.0.1

auto eth2

iface eth2 inet static

address 10.0.1.33

netmask 255.255.255.0

注:以上服务器的dns为:192.168.1.1

controller服务器安装openstack组件

安装数据库mysql

apt-get install -y mysql-server-5.6 python-mysqldb

点窜mysql设置装备摆设文件文件

vi /etc/mysql/my.cnf

[mysqld]

default-storage-engine = innodb

innodb_file_per_table

collation-server = utf8_general_ci

init-connect = 'set names utf8'

character-set-server = utf8

#bind-address = 127.0.0.1

bind-address = 0.0.0.0

重启数据库

service mysql restart

删除数据库匿名用户

在终端下执行

mysql_install_db

mysql_secure_installation

安装 rabbitmq (message queue)服务:

apt-get install -y rabbitmq-server

安装keystone

apt-get install -y keystone

建立keystone数据库,都是经由过程 mysql –u root –p 进入

create database keystone;

grant all privileges on keystone.* to 'keystone'@'localhost' identified by 'keystone_dbpass';

grant all privileges on keystone.* to 'keystone'@'%' identified by 'keystone_dbpass';

exit;

删除sqllite数据库

rm /var/lib/keystone/keystone.db

设置装备摆设keystone

编辑 /etc/keystone/keystone.conf

[default]

admin_token=admin

log_dir=/var/log/keystone

[database]

#connection=sqlite:////var/lib/keystone/keystone.db

connection = mysql://keystone:keystone_dbpass@10.0.0.11/keystone

重启keystone

service keystone restart

同步keystone数据库

keystone-manage db_sync

设置情况变量

export os_service_token=admin

export os_service_endpoint=http://10.0.0.11:35357/v2.0

建立经管员权力的用户

root@controller:~# keystone user-create --name=admin --pass=admin_pass --email=admin@domain.com

+----------+----------------------------------+

| property | value |

+----------+----------------------------------+

| email | admin@domain.com |

| enabled | true |

| id | 61991b4c9abe46968b08c6d3268e8b25 |

| name | admin |

| username | admin |

+----------+----------------------------------+

root@controller:~# keystone role-create --name=admin

+----------+----------------------------------+

| property | value |

+----------+----------------------------------+

| id | 14d9aa53cfd7404ea5ecdc8c6ff96bb3 |

| name | admin |

+----------+----------------------------------+

root@controller:~# keystone role-create --name=_member_

+----------+----------------------------------+

| property | value |

+----------+----------------------------------+

| id | 69d86b6c21d54fc3848b30d8a7afa6d6 |

| name | _member_ |

+----------+----------------------------------+

root@controller:~# keystone tenant-create --name=admin --description="admin tenant"

+-------------+----------------------------------+

| property | value |

+-------------+----------------------------------+

| description | admin tenant |

| enabled | true |

| id | 9474847b08264433b623233c85b7b6de |

| name | admin |

+-------------+----------------------------------+

root@controller:~# keystone user-role-add --user=admin --tenant=admin --role=admin

root@controller:~# keystone user-role-add --user=admin --role=_member_ --tenant=admin

建立通俗用户

root@controller:~# keystone user-create --name=demo --pass=demo_pass --email=demo@domain.com

+----------+----------------------------------+

| property | value |

+----------+----------------------------------+

| email | demo@domain.com |

| enabled | true |

| id | f40209d709564e5fbe04dc4659f4ee72 |

| name | demo |

| username | demo |

+----------+----------------------------------+

root@controller:~# keystone tenant-create --name=demo --description="demo tenant"

+-------------+----------------------------------+

| property | value |

+-------------+----------------------------------+

| description | demo tenant |

| enabled | true |

| id | 5e3aa75b5bce4723a755e356ef22ad26 |

| name | demo |

+-------------+----------------------------------+

root@controller:~# keystone user-role-add --user=demo --role=_member_ --tenant=demo

建立 service 租户

root@controller:~# keystone tenant-create --name=service --description="service tenant"

+-------------+----------------------------------+

| property | value |

+-------------+----------------------------------+

| description | service tenant |

| enabled | true |

| id | 4fd53777c8f84c72b09ef025ab45977d |

| name | service |

+-------------+----------------------------------+

界说服务的api的endpoint

root@controller:~# keystone service-create --name=keystone --type=identity --description="openstack identity"

+-------------+----------------------------------+

| property | value |

+-------------+----------------------------------+

| description | openstack identity |

| enabled | true |

| id | 6b6023376cc040e8be26a57815f17b87 |

| name | keystone |

| type | identity |

+-------------+----------------------------------+

建立endpoint

root@controller:~# keystone endpoint-create

> --service-id=$(keystone service-list | awk '/ identity / {print $2}')

> --publicurl=http://192.168.2.11:5000/v2.0

> --internalurl=http://10.0.0.11:5000/v2.0

> --adminurl=http://10.0.0.11:35357/v2.0

+-------------+----------------------------------+

| property | value |

+-------------+----------------------------------+

| adminurl | http://10.0.0.11:35357/v2.0 |

| id | 0dcae7b8deb9437996c7c7e0ed0b4086 |

| internalurl | http://10.0.0.11:5000/v2.0 |

| publicurl | http://192.168.2.11:5000/v2.0 |

| region | regionone |

| service_id | 6b6023376cc040e8be26a57815f17b87 |

+-------------+----------------------------------+

检测keystone

经由过程下面号令查抄keystone的初始化是不是正常

设置情况变量,建立creds 和 admin_creds 两个文件

cat <<eof >>/root/creds

export os_tenant_name=admin

export os_username=admin

export os_password=admin_pass

export os_auth_url="http://192.168.2.11:5000/v2.0/"

eof

cat <<eof >>/root/admin_creds

export os_username=admin

export os_password=admin_pass

export os_tenant_name=admin

export os_auth_url=http://10.0.0.11:35357/v2.0

eof

设置情况变量才能进行下面操作

断根os_service_token 和os_service_endpoint情况变量里的值,不断根的话,会呈现警告

unset os_service_token os_service_endpoint

加载情况变量

source creds

如许便可以

root@controller:~# keystone user-list

+----------------------------------+-------+---------+------------------+

| id | name | enabled | email |

+----------------------------------+-------+---------+------------------+

| 61991b4c9abe46968b08c6d3268e8b25 | admin | true | admin@domain.com |

| f40209d709564e5fbe04dc4659f4ee72 | demo | true | demo@domain.com |

+----------------------------------+-------+---------+------------------+

root@controller:~# keystone role-list

+----------------------------------+----------+

| id | name |

+----------------------------------+----------+

| 69d86b6c21d54fc3848b30d8a7afa6d6 | _member_ |

| 14d9aa53cfd7404ea5ecdc8c6ff96bb3 | admin |

+----------------------------------+----------+

root@controller:~# keystone tenant-list

+----------------------------------+---------+---------+

| id | name | enabled |

+----------------------------------+---------+---------+

| 9474847b08264433b623233c85b7b6de | admin | true |

| 5e3aa75b5bce4723a755e356ef22ad26 | demo | true |

| 4fd53777c8f84c72b09ef025ab45977d | service | true |

+----------------------------------+---------+---------+

glance安装设置装备摆设

apt-get install -y glance python-glanceclient

建立数据库 mysql –u root –p

create database glance;

grant all privileges on glance.* to 'glance'@'localhost' identified by 'glance_dbpass';

grant all privileges on glance.* to 'glance'@'%' identified by 'glance_dbpass';

exit;

keystone建立glance用户和服务

root@controller:~# keystone user-create --name=glance --pass=service_pass --email=glance@domain.com

+----------+----------------------------------+

| property | value |

+----------+----------------------------------+

| email | glance@domain.com |

| enabled | true |

| id | 9fa6993da7944a59b342a73a6f18728a |

| name | glance |

| username | glance |

+----------+----------------------------------+

root@controller:~# keystone user-role-add --user=glance --tenant=service --role=admin

设置endpoint

root@controller:~# keystone service-create --name=glance --type=image --description="openstack image service"

+-------------+----------------------------------+

| property | value |

+-------------+----------------------------------+

| description | openstack image service |

| enabled | true |

| id | d3d6fb3384db4ce9ad3423817b52bac9 |

| name | glance |

| type | image |

+-------------+----------------------------------+

root@controller:~# keystone endpoint-create

> --service-id=$(keystone service-list | awk '/ image / {print $2}')

> --publicurl=http://192.168.2.11:9292

> --internalurl=http://10.0.0.11:9292

> --adminurl=http://10.0.0.11:9292

+-------------+----------------------------------+

| property | value |

+-------------+----------------------------------+

| adminurl | http://10.0.0.11:9292 |

| id | 0859727be85d473391c935c3f52ddddf |

| internalurl | http://10.0.0.11:9292 |

| publicurl | http://192.168.2.11:9292 |

| region | regionone |

| service_id | d3d6fb3384db4ce9ad3423817b52bac9 |

+-------------+----------------------------------+

编辑glance设置装备摆设文件

vim /etc/glance/glance-api.conf

[database]

connection = mysql://glance:glance_dbpass@10.0.0.11/glance

[default]

rpc_backend = rabbit

rabbit_host = 10.0.0.11

[keystone_authtoken]

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = glance

admin_password = service_pass

[paste_deploy]

flavor = keystone

vim /etc/glance/glance-registry.conf

[database]

# the file name to use with sqlite (string value)

#sqlite_db = /var/lib/glance/glance.sqlite

connection = mysql://glance:glance_dbpass@10.0.0.11/glance

[keystone_authtoken]

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = glance

admin_password = service_pass

[paste_deploy]

flavor = keystone

重启服务

service glance-api restart; service glance-registry restart

初始化glance数据库

glance-manage db_sync

加载情况变量

source creds

上传测试镜像

root@controller:~# glance image-create --name "cirros-0.3.2-x86_64" --is-public true

> --container-format bare --disk-format qcow2

> --location http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img

+------------------+--------------------------------------+

| property | value |

+------------------+--------------------------------------+

| checksum | none |

| container_format | bare |

| created_at | 2015-03-20t08:02:56 |

| deleted | false |

| deleted_at | none |

| disk_format | qcow2 |

| id | 5dbfecab-9828-4492-88bb-c0dd6aa6d75c |

| is_public | true |

| min_disk | 0 |

| min_ram | 0 |

| name | cirros-0.3.2-x86_64 |

| owner | 9474847b08264433b623233c85b7b6de |

| protected | false |

| size | 13200896 |

| status | active |

| updated_at | 2015-03-20t08:02:57 |

| virtual_size | none |

+------------------+--------------------------------------+

查看镜像

root@controller:~# glance image-list

+--------------------------------------+---------------------+-------------+------------------+----------+--------+

| id | name | disk format | container format | size | status |

+--------------------------------------+---------------------+-------------+------------------+----------+--------+

| 5dbfecab-9828-4492-88bb-c0dd6aa6d75c | cirros-0.3.2-x86_64 | qcow2 | bare | 13200896 | active |

+--------------------------------------+---------------------+-------------+------------------+----------+--------+

nova组件安装设置装备摆设

apt-get install -y nova-api nova-cert nova-conductor nova-consoleauth

nova-novncproxy nova-scheduler python-novaclient

建立nova 数据库

mysql -u root -p

create database nova;

grant all privileges on nova.* to 'nova'@'localhost' identified by 'nova_dbpass';

grant all privileges on nova.* to 'nova'@'%' identified by 'nova_dbpass';

exit;

keystone建立nova用户和脚色

root@controller:~# keystone user-create --name=nova --pass=service_pass --email=nova@domain.com

+----------+----------------------------------+

| property | value |

+----------+----------------------------------+

| email | nova@domain.com |

| enabled | true |

| id | cc25a28979b0467cac7a33426b8180f7 |

| name | nova |

| username | nova |

+----------+----------------------------------+

root@controller:~# keystone user-role-add --user=nova --tenant=service --role=admin

注册服务和设置endpoint

root@controller:~# keystone service-create --name=nova --type=compute --description="openstack compute"

+-------------+----------------------------------+

| property | value |

+-------------+----------------------------------+

| description | openstack compute |

| enabled | true |

| id | 7bb1f0e64e3b4ef8b0408902261b2b37 |

| name | nova |

| type | compute |

+-------------+----------------------------------+

root@controller:~# keystone endpoint-create

> --service-id=$(keystone service-list | awk '/ compute / {print $2}')

> --publicurl=http://192.168.2.11:8774/v2/%(tenant_id)s

> --internalurl=http://10.0.0.11:8774/v2/%(tenant_id)s

> --adminurl=http://10.0.0.11:8774/v2/%(tenant_id)s

+-------------+-------------------------------------------+

| property | value |

+-------------+-------------------------------------------+

| adminurl | http://10.0.0.11:8774/v2/%(tenant_id)s |

| id | 24fc3bf020084040ba6a58d60c0b1719 |

| internalurl | http://10.0.0.11:8774/v2/%(tenant_id)s |

| publicurl | http://192.168.2.11:8774/v2/%(tenant_id)s |

| region | regionone |

| service_id | 7bb1f0e64e3b4ef8b0408902261b2b37 |

+-------------+-------------------------------------------+

设置装备摆设nova文件

vim/etc/nova/nova.conf

以下是我的nova完整设置装备摆设文件

[default]

dhcpbridge_flagfile=/etc/nova/nova.conf

dhcpbridge=/usr/bin/nova-dhcpbridge

logdir=/var/log/nova

state_path=/var/lib/nova

lock_path=/var/lock/nova

force_dhcp_release=true

iscsi_helper=tgtadm

libvirt_use_virtio_for_bridges=true

connection_type=libvirt

root_helper=nova-rootwrap /etc/nova/rootwrap.conf

verbose=true

ec2_private_dns_show_ip=true

api_paste_config=/etc/nova/api-paste.ini

volumes_path=/var/lib/nova/volumes

enabled_apis=ec2,osapi_compute,metadata

rpc_backend = rabbit

rabbit_host = 10.0.0.11

my_ip = 10.0.0.11

vncserver_listen = 10.0.0.11

vncserver_proxyclient_address = 10.0.0.11

auth_strategy = keystone

[keystone_authtoken]

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = nova

admin_password = service_pass

[database]

connection = mysql://nova:nova_dbpass@10.0.0.11/nova

删除sqlite数据库

rm /var/lib/nova/nova.sqlite

初始化nova数据库

nova-manage db sync

重启nova相关服务

service nova-api restart

service nova-cert restart

service nova-conductor restart

service nova-consoleauth restart

service nova-novncproxy restart

service nova-scheduler restart

查抄nova服务状况

root@controller:~# nova-manage service list

binary host zone status state updated_at

nova-cert controller internal enabled :-) 2015-03-20 08:24:17

nova-consoleauth controller internal enabled :-) 2015-03-20 08:24:17

nova-conductor controller internal enabled :-) 2015-03-20 08:24:17

nova-scheduler controller internal enabled :-) 2015-03-20 08:24:17

看到笑脸申明服务都启动了

neutron组件安装设置装备摆设

apt-get install -y neutron-server neutron-plugin-ml2

建立neutron数据库

mysql -u root -p

create database neutron;

grant all privileges on neutron.* to neutron@'localhost' identified by 'neutron_dbpass';

grant all privileges on neutron.* to neutron@'%' identified by 'neutron_dbpass';

exit;

keystone建立neutron用户和脚色

root@controller:~# keystone user-create --name=neutron --pass=service_pass --email=neutron@domain.com

+----------+----------------------------------+

| property | value |

+----------+----------------------------------+

| email | neutron@domain.com |

| enabled | true |

| id | 322f0a1d2c7e416abf0e118e50625443 |

| name | neutron |

| username | neutron |

+----------+----------------------------------+

root@controller:~# keystone user-role-add --user=neutron --tenant=service --role=admin

注册服务和endpoint

root@controller:~# keystone service-create --name=neutron --type=network --description="openstack networking"

+-------------+----------------------------------+

| property | value |

+-------------+----------------------------------+

| description | openstack networking |

| enabled | true |

| id | e3d179a7b9be42ba982c79cd652a7be8 |

| name | neutron |

| type | network |

+-------------+----------------------------------+

root@controller:~# keystone endpoint-create

> --service-id=$(keystone service-list | awk '/ network / {print $2}')

> --publicurl=http://192.168.2.11:9696

> --internalurl=http://10.0.0.11:9696

> --adminurl=http://10.0.0.11:9696

+-------------+----------------------------------+

| property | value |

+-------------+----------------------------------+

| adminurl | http://10.0.0.11:9696 |

| id | 8b968c25d8324bb28125604a21c64f54 |

| internalurl | http://10.0.0.11:9696 |

| publicurl | http://192.168.2.11:9696 |

| region | regionone |

| service_id | e3d179a7b9be42ba982c79cd652a7be8 |

+-------------+----------------------------------+

获得nova_admin_tenant_id

root@controller:~# keystone tenant-list | awk '/ service / { print $2 }'

4fd53777c8f84c72b09ef025ab45977d

编辑neutron设置装备摆设文件

vim /etc/neutron/neutron.conf

[default]

# example: service_plugins = router,firewall,lbaas,vpnaas,metering

service_plugins = router,lbaas

# auth_strategy = keystone

auth_strategy = keystone

# allow_overlapping_ips = false

allow_overlapping_ips = true

rpc_backend = neutron.openstack.common.rpc.impl_kombu

rabbit_host = 10.0.0.11

notification_driver = neutron.openstack.common.notifier.rpc_notifier

# ======== neutron nova interactions ==========

# send notification to nova when port status is active.

notify_nova_on_port_status_changes = true

# send notifications to nova when port data (fixed_ips/floatingips) change

# so nova can update it's cache.

notify_nova_on_port_data_changes = true

# url for connection to nova (only supports one nova region currently).

nova_url = http://10.0.0.11:8774/v2

# name of nova region to use. useful if keystone manages more than one region

# nova_region_name =

# username for connection to nova in admin context

nova_admin_username = nova

# the uuid of the admin nova tenant

nova_admin_tenant_id = 4fd53777c8f84c72b09ef025ab45977d

# password for connection to nova in admin context.

nova_admin_password = service_pass

# authorization url for connection to nova in admin context.

nova_admin_auth_url = http://10.0.0.11:35357/v2.0

[keystone_authtoken]

#auth_host = 127.0.0.1

#auth_port = 35357

#auth_protocol = http

#admin_tenant_name = %service_tenant_name%

#admin_user = %service_user%

#admin_password = %service_password%

#signing_dir = $state_path/keystone-signing

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = neutron

admin_password = service_pass

[database]

#connection = sqlite:////var/lib/neutron/neutron.sqlite

connection = mysql://neutron:neutron_dbpass@10.0.0.11/neutron

设置装备摆设2层收集组件

vim /etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]

type_drivers = gre

tenant_network_types = gre

mechanism_drivers = openvswitch

[ml2_type_gre]

tunnel_id_ranges = 1:1000

[securitygroup]

firewall_driver = neutron.agent.linux.iptables_firewall.ovshybridiptablesfirewalldriver

enable_security_group = true

设置装备摆设nova撑持neutron

vim /etc/nova/nova.conf

在[default] 添加

network_api_class=nova.network.neutronv2.api.api

neutron_url=http://10.0.0.11:9696

neutron_auth_strategy=keystone

neutron_admin_tenant_name=service

neutron_admin_username=neutron

neutron_admin_password=service_pass

neutron_admin_auth_url=http://10.0.0.11:35357/v2.0

libvirt_vif_driver=nova.virt.libvirt.vif.libvirthybridovsbridgedriver

linuxnet_interface_driver=nova.network.linux_net.linuxovsinterfacedriver

firewall_driver=nova.virt.firewall.noopfirewalldriver

security_group_api=neutron

重启nova服务

service nova-api restart

service nova-scheduler restart

service nova-conductor restart

重启neutron服务

service neutron-server restart

安装openstack经管界面ui---horizon

apt-get install -y apache2 memcached libapache2-mod-wsgi openstack-dashboard

编辑 /etc/openstack-dashboard/local_settings.py

#allowed_hosts = ['horizon.example.com', ]

allowed_hosts = ['localhost','192.168.2.11']

#openstack_host = "127.0.0.1"

openstack_host = "10.0.0.11"

vi /etc/apache2/apache2.conf

在文件最后一行添上下面这行: servername localhost

重启apache服务

service apache2 restart; service memcached restart

节制端到这里安装完成

收集节点network

安装根本组件

apt-get install -y vlan bridge-utils

编辑

vim /etc/sysctl.conf

在文件最后添加:

net.ipv4.ip_forward=1

net.ipv4.conf.all.rp_filter=0

net.ipv4.conf.default.rp_filter=0

生效

sysctl -p

安装neutron组件

apt-get install -y neutron-plugin-ml2 neutron-plugin-openvswitch-agent haproxy neutron-lbaas-agent

dnsmasq neutron-l3-agent neutron-dhcp-agent

编辑neutron设置装备摆设文件

vim /etc/neutron/neutron.conf

# example: service_plugins = router,firewall,lbaas,vpnaas,metering

service_plugins = router,lbaas

# the strategy to be used for auth.

# supported values are 'keystone'(default), 'noauth'.

auth_strategy = keystone

allow_overlapping_ips = true

rpc_backend = neutron.openstack.common.rpc.impl_kombu

rabbit_host = 10.0.0.11

[keystone_authtoken]

#auth_host = 127.0.0.1

#auth_port = 35357

#auth_protocol = http

#admin_tenant_name = %service_tenant_name%

#admin_user = %service_user%

#admin_password = %service_password%

#signing_dir = $state_path/keystone-signing

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = neutron

admin_password = service_pass

编辑 /etc/neutron/l3_agent.ini

interface_driver = neutron.agent.linux.interface.ovsinterfacedriver

use_namespaces = true

编辑 /etc/neutron/dhcp_agent.ini

interface_driver = neutron.agent.linux.interface.ovsinterfacedriver

dhcp_driver = neutron.agent.linux.dhcp.dnsmasq

use_namespaces = true

编辑 /etc/neutron/metadata_agent.ini

auth_url = http://10.0.0.11:5000/v2.0

auth_region = regionone

admin_tenant_name = service

admin_user = neutron

admin_password = service_pass

nova_metadata_ip = 10.0.0.11

metadata_proxy_shared_secret = helloopenstack

登录节制节点,点窜 /etc/nova.conf 在[default] 插手下面内容

service_neutron_metadata_proxy = true

metadata_proxy_shared_secret = helloopenstack

重启nova api服务

service nova-api restart

编辑 /etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]

type_drivers = gre

tenant_network_types = gre

mechanism_drivers = openvswitch

[ml2_type_gre]

tunnel_id_ranges = 1:1000

[ovs]

local_ip = 10.0.1.22

tunnel_type = gre

enable_tunneling = true

[securitygroup]

firewall_driver = neutron.agent.linux.iptables_firewall.ovshybridiptablesfirewalldriver

enable_security_group = true

编辑/etc/neutron/lbaas_agent.ini

[default]

device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.haproxynsdriver

interface_driver = neutron.agent.linux.interface.ovsinterfacedriver

[haproxy]

user_group = nogroup

重启openvswitch

service openvswitch-switch restart

建立br-ex

ovs-vsctl add-br br-ex

ovs-vsctl add-port br-ex eth0

编辑 /etc/network/interfaces

# this file describes the network interfaces available on your system

# and how to activate them. for more information, see interfaces(5).

# the loopback network interface

auto lo

iface lo inet loopback

# the primary network interface

#auto eth0

#iface eth0 inet static

# address 192.168.2.22

# netmask 255.255.0.0

# network 192.168.0.0

# broadcast 192.168.255.255

# gateway 192.168.1.1

# dns-* options are implemented by the resolvconf package, if installed

# dns-nameservers 192.168.1.1

auto eth0

iface eth0 inet manual

up ifconfig $iface 0.0.0.0 up

up ip link set $iface promisc on

down ip link set $iface promisc off

down ifconfig $iface down

auto br-ex

iface br-ex inet static

address 192.168.2.22

netmask 255.255.0.0

gateway 192.168.1.1

dns-nameservers 192.168.1.1

auto eth1

iface eth1 inet static

address 10.0.0.22

netmask 255.255.255.0

gateway 10.0.0.1

auto eth2

iface eth2 inet static

address 10.0.1.22

netmask 255.255.255.0

设置情况变量

cat <<eof >>/root/creds

export os_tenant_name=admin

export os_username=admin

export os_password=admin_pass

export os_auth_url="http://192.168.2.11:5000/v2.0/"

eof

source creds

root@controller:~# neutron agent-list

unable to establish connection to http://192.168.2.11:9696/v2.0/agents.json

原因:

不克不及同步数据库

,同步即解决

su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade

juno" neutron

root@controller:~# neutron agent-list

+--------------------------------------+--------------------+---------+-------+----------------+---------------------------+

| id | agent_type | host | alive | admin_state_up | binary |

+--------------------------------------+--------------------+---------+-------+----------------+---------------------------+

| 08bedacf-5eb4-445e-ba91-ea0d481a5772 | dhcp agent | network | :-) | true | neutron-dhcp-agent |

| 263fa30f-0af9-4534-9153-ea01ffa71874 | loadbalancer agent | network | :-) | true | neutron-lbaas-agent |

| 32a17ac6-50c6-4cfa-8032-8c6f67984251 | l3 agent | network | :-) | true | neutron-l3-agent |

| 3e0d5e0c-41c1-4fe0-9642-05862c0d65ed | open vswitch agent | network | :-) | true | neutron-openvswitch-agent |

| c02625d3-d3df-4bd8-bdfa-a75fff5f2f66 | metadata agent | network | :-) | true | neutron-metadata-agent |

+--------------------------------------+--------------------+---------+-------+----------------+---------------------------+

network服务器设置装备摆设完成

计较节点

安装kvm套件

apt-get install -y kvm libvirt-bin pm-utils

安装计较节点组件

apt-get install -y nova-compute-kvm python-guestfs

让内核只读

dpkg-statoverride --update --add root root 0644 /boot/vmlinuz-$(uname -r)

建立剧本 /etc/kernel/postinst.d/statoverride

#!/bin/sh

version="$1"

# passing the kernel version is required

[ -z "${version}" ] && exit 0

dpkg-statoverride --update --add root root 0644 /boot/vmlinuz-${version}

许可运行

chmod +x /etc/kernel/postinst.d/statoverride

编辑 /etc/nova/nova.conf 文件,添加下面内容

[default]

dhcpbridge_flagfile=/etc/nova/nova.conf

dhcpbridge=/usr/bin/nova-dhcpbridge

logdir=/var/log/nova

state_path=/var/lib/nova

lock_path=/var/lock/nova

force_dhcp_release=true

iscsi_helper=tgtadm

libvirt_use_virtio_for_bridges=true

connection_type=libvirt

root_helper=nova-rootwrap /etc/nova/rootwrap.conf

verbose=true

ec2_private_dns_show_ip=true

api_paste_config=/etc/nova/api-paste.ini

volumes_path=/var/lib/nova/volumes

enabled_apis=ec2,osapi_compute,metadata

auth_strategy = keystone

rpc_backend = rabbit

rabbit_host = 10.0.0.11

my_ip = 10.0.0.33

vnc_enabled = true

vncserver_listen = 0.0.0.0

vncserver_proxyclient_address = 10.0.0.33

novncproxy_base_url = http://192.168.2.11:6080/vnc_auto.html

glance_host = 10.0.0.11

vif_plugging_is_fatal=false

vif_plugging_timeout=0

[database]

connection = mysql://nova:nova_dbpass@10.0.0.11/nova

[keystone_authtoken]

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = nova

admin_password = service_pass

删除sqlite

rm /var/lib/nova/nova.sqlite

重启compute服务

service nova-compute restart

编辑 /etc/sysctl.conf

net.ipv4.ip_forward=1

net.ipv4.conf.all.rp_filter=0

net.ipv4.conf.default.rp_filter=0

顿时生效

sysctl -p

安装收集组件

apt-get install -y neutron-common neutron-plugin-ml2 neutron-plugin-openvswitch-agent

编辑 /etc/neutron/neutron.conf

#core_plugin = neutron.plugins.ml2.plugin.ml2plugin

core_plugin = ml2

# service_plugins =

# example: service_plugins = router,firewall,lbaas,vpnaas,metering

service_plugins = router

auth_strategy = keystone

allow_overlapping_ips = true

rpc_backend = neutron.openstack.common.rpc.impl_kombu

rabbit_host = 10.0.0.11

[keystone_authtoken]

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = neutron

admin_password = service_pass

编辑 /etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]

type_drivers = gre

tenant_network_types = gre

mechanism_drivers = openvswitch

[ml2_type_gre]

tunnel_id_ranges = 1:1000

[ovs]

local_ip = 10.0.1.33

tunnel_type = gre

enable_tunneling = true

[securitygroup]

firewall_driver = neutron.agent.linux.iptables_firewall.ovshybridiptablesfirewalldriver

enable_security_group = true

重启ovs

service openvswitch-switch restart

再编辑 /etc/nova/nova.conf ,在[default]里添加下面

network_api_class = nova.network.neutronv2.api.api

neutron_url = http://10.0.0.11:9696

neutron_auth_strategy = keystone

neutron_admin_tenant_name = service

neutron_admin_username = neutron

neutron_admin_password = service_pass

neutron_admin_auth_url = http://10.0.0.11:35357/v2.0

linuxnet_interface_driver = nova.network.linux_net.linuxovsinterfacedriver

firewall_driver = nova.virt.firewall.noopfirewalldriver

security_group_api = neutron

编辑 /etc/nova/nova-compute.conf ,点窜为利用qemu

[default]

compute_driver=libvirt.libvirtdriver

[libvirt]

virt_type=qemu

重启相关服务

service nova-compute restart

service neutron-plugin-openvswitch-agent restart

节制端查验

root@controller:~# nova-manage service list

binary host zone status state updated_at

nova-cert controller internal enabled :-) 2015-03-20 10:29:32

nova-consoleauth controller internal enabled :-) 2015-03-20 10:29:31

nova-conductor controller internal enabled :-) 2015-03-20 10:29:36

nova-scheduler controller internal enabled :-) 2015-03-20 10:29:35

nova-compute compute nova enabled :-) 2015-03-20 10:29:31

到这里openstack3个节点都安装完成

此刻可以登录利用

http://192.168.2.11/horizon/

admin

admin_pass

以上是:解决“ubuntu教程”ubuntu14.04server 下安装openstack juno问题的详细资料教程