diff --git a/.gitignore b/.gitignore index 8fe56ad6ab..ad153f4a07 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,5 @@ stack-screenrc userrc_early AUTHORS ChangeLog +tools/dbcounter/build/ +tools/dbcounter/dbcounter.egg-info/ diff --git a/.zuul.yaml b/.zuul.yaml index ca3e692717..579292b487 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,118 +1,121 @@ -- pragma: - # NOTE(gtema): this is required for the changes in SDK feature/r1 branch to - # be using devstack - # TODO(gtema): delete this once r1 branch is merged into master - implied-branches: - - master - - feature/r1 - - nodeset: - name: openstack-single-node + name: openstack-single-node-jammy nodes: - name: controller - label: ubuntu-xenial + label: ubuntu-jammy groups: - name: tempest nodes: - controller - nodeset: - name: openstack-single-node-focal + name: openstack-single-node-noble nodes: - name: controller - label: ubuntu-focal + label: ubuntu-noble groups: - name: tempest nodes: - controller - nodeset: - name: openstack-single-node-bionic + name: openstack-single-node-focal nodes: - name: controller - label: ubuntu-bionic + label: ubuntu-focal groups: - name: tempest nodes: - controller + +# TODO(frickler): drop this dummy nodeset once all references have been removed +- nodeset: + name: openstack-single-node-bionic + nodes: [] + - nodeset: - name: openstack-single-node-xenial + name: devstack-single-node-almalinux-10 nodes: - name: controller - label: ubuntu-xenial + label: almalinux-10-8GB groups: - name: tempest nodes: - controller - nodeset: - name: devstack-single-node-centos-7 + name: devstack-single-node-centos-9-stream nodes: - name: controller - label: centos-7 + label: centos-9-stream groups: - name: tempest nodes: - controller - nodeset: - name: devstack-single-node-centos-9-stream + name: devstack-single-node-centos-10-stream nodes: - name: controller - label: centos-9-stream + label: centos-10-stream-8GB groups: - name: tempest nodes: - controller - nodeset: - name: devstack-single-node-opensuse-15 + name: devstack-single-node-debian-trixie nodes: - name: controller - label: opensuse-15 + label: debian-trixie-8GB groups: - name: tempest nodes: - controller - nodeset: - name: devstack-single-node-fedora-latest + name: devstack-single-node-debian-bookworm nodes: - name: controller - label: fedora-35 + label: debian-bookworm groups: - name: tempest nodes: - controller +# TODO(frickler): drop this dummy nodeset once all references have been removed +- nodeset: + name: devstack-single-node-opensuse-15 + nodes: [] + - nodeset: - name: devstack-single-node-debian-bullseye + name: devstack-single-node-rockylinux-9 nodes: - name: controller - label: debian-bullseye + label: rockylinux-9 groups: - name: tempest nodes: - controller - nodeset: - name: devstack-single-node-openeuler-20.03-sp2 + name: devstack-single-node-rockylinux-10 nodes: - name: controller - label: openEuler-20-03-LTS-SP2 + label: rockylinux-10-8GB groups: - name: tempest nodes: - controller - nodeset: - name: openstack-two-node + name: openstack-two-node-centos-10-stream nodes: - name: controller - label: ubuntu-xenial + label: centos-10-stream-8GB - name: compute1 - label: ubuntu-xenial + label: centos-10-stream-8GB groups: # Node where tests are executed and test results collected - name: tempest @@ -167,12 +170,12 @@ - compute1 - nodeset: - name: openstack-two-node-focal + name: openstack-two-node-jammy nodes: - name: controller - label: ubuntu-focal + label: ubuntu-jammy - name: compute1 - label: ubuntu-focal + label: ubuntu-jammy groups: # Node where tests are executed and test results collected - name: tempest @@ -197,12 +200,12 @@ - compute1 - nodeset: - name: openstack-two-node-bionic + name: openstack-two-node-noble nodes: - name: controller - label: ubuntu-bionic + label: ubuntu-noble - name: compute1 - label: ubuntu-bionic + label: ubuntu-noble groups: # Node where tests are executed and test results collected - name: tempest @@ -227,12 +230,12 @@ - compute1 - nodeset: - name: openstack-two-node-xenial + name: openstack-two-node-focal nodes: - name: controller - label: ubuntu-xenial + label: ubuntu-focal - name: compute1 - label: ubuntu-xenial + label: ubuntu-focal groups: # Node where tests are executed and test results collected - name: tempest @@ -256,6 +259,12 @@ nodes: - compute1 + +# TODO(frickler): drop this dummy nodeset once all references have been removed +- nodeset: + name: openstack-two-node-bionic + nodes: [] + - nodeset: name: openstack-three-node-focal nodes: @@ -291,15 +300,48 @@ - compute1 - compute2 +# TODO(frickler): drop this dummy nodeset once all references have been removed - nodeset: name: openstack-three-node-bionic + nodes: [] + +- nodeset: + name: devstack-two-node-debian-bookworm nodes: - name: controller - label: ubuntu-bionic + label: debian-bookworm - name: compute1 - label: ubuntu-bionic - - name: compute2 - label: ubuntu-bionic + label: debian-bookworm + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: devstack-two-node-debian-trixie + nodes: + - name: controller + label: debian-trixie-8GB + - name: compute1 + label: debian-trixie-8GB groups: # Node where tests are executed and test results collected - name: tempest @@ -310,12 +352,10 @@ nodes: - controller - compute1 - - compute2 # Nodes that are not the controller - name: subnode nodes: - compute1 - - compute2 # Switch node for multinode networking setup - name: switch nodes: @@ -324,11 +364,10 @@ - name: peers nodes: - compute1 - - compute2 - job: name: devstack-base - parent: multinode + parent: openstack-multinode-fips abstract: true description: | Base abstract Devstack job. @@ -342,8 +381,13 @@ nodes (everything but the controller). required-projects: - opendev.org/openstack/devstack + # this is a workaround for a packaging bug in ubuntu + # remove when https://bugs.launchpad.net/nova/+bug/2109592 + # is resolved and oslo.config is not a dep of the novnc deb + # via the defunct python3-novnc package. + - novnc/novnc + roles: - - zuul: opendev.org/openstack/devstack-gate - zuul: opendev.org/openstack/openstack-zuul-jobs vars: devstack_localrc: @@ -360,7 +404,6 @@ LOG_COLOR: false VERBOSE: true VERBOSE_NO_TIMESTAMP: true - NOVNC_FROM_PACKAGE: true ERROR_ON_CLONE: true # Gate jobs can't deal with nested virt. Disable it by default. LIBVIRT_TYPE: '{{ devstack_libvirt_type | default("qemu") }}' @@ -373,10 +416,12 @@ '{{ devstack_conf_dir }}/.localrc.auto': logs '{{ devstack_conf_dir }}/.stackenv': logs '{{ devstack_log_dir }}/dstat-csv.log': logs + '{{ devstack_log_dir }}/atop': logs '{{ devstack_log_dir }}/devstacklog.txt': logs '{{ devstack_log_dir }}/devstacklog.txt.summary': logs '{{ devstack_log_dir }}/tcpdump.pcap': logs '{{ devstack_log_dir }}/worlddump-latest.txt': logs + '{{ devstack_log_dir }}/qemu.coredump': logs '{{ devstack_full_log}}': logs '{{ stage_dir }}/verify_tempest_conf.log': logs '{{ stage_dir }}/performance.json': logs @@ -388,16 +433,19 @@ /var/log/mysql: logs /var/log/libvirt: logs /etc/libvirt: logs + /etc/lvm: logs /etc/sudoers: logs /etc/sudoers.d: logs '{{ stage_dir }}/iptables.txt': logs '{{ stage_dir }}/df.txt': logs + '{{ stage_dir }}/mount.txt': logs '{{ stage_dir }}/pip2-freeze.txt': logs '{{ stage_dir }}/pip3-freeze.txt': logs '{{ stage_dir }}/dpkg-l.txt': logs '{{ stage_dir }}/rpm-qa.txt': logs '{{ stage_dir }}/core': logs '{{ stage_dir }}/listen53.txt': logs + '{{ stage_dir }}/services.txt': logs '{{ stage_dir }}/deprecations.log': logs '{{ stage_dir }}/audit.log': logs /etc/ceph: logs @@ -429,7 +477,6 @@ LOG_COLOR: false VERBOSE: true VERBOSE_NO_TIMESTAMP: true - NOVNC_FROM_PACKAGE: true ERROR_ON_CLONE: true LIBVIRT_TYPE: qemu devstack_services: @@ -437,7 +484,7 @@ pre-run: playbooks/pre.yaml run: playbooks/devstack.yaml post-run: playbooks/post.yaml - irrelevant-files: + irrelevant-files: &common-irrelevant-files # Documentation related - ^.*\.rst$ - ^api-ref/.*$ @@ -445,6 +492,12 @@ - ^releasenotes/.*$ # Translations - ^.*/locale/.*po$ + # pre-commit config + - ^\.pre-commit-config\.yaml$ + # gitignore config + - ^\.gitignore$ + # gitreview config + - ^\.gitreview$ - job: name: devstack-minimal @@ -452,7 +505,7 @@ description: | Minimal devstack base job, intended for use by jobs that need less than the normal minimum set of required-projects. - nodeset: openstack-single-node-focal + nodeset: openstack-single-node-noble required-projects: - opendev.org/openstack/requirements vars: @@ -466,14 +519,18 @@ dstat: false etcd3: true memory_tracker: true + file_tracker: true mysql: true rabbit: true + openstack-cli-server: true group-vars: subnode: devstack_services: # Shared services dstat: false memory_tracker: true + file_tracker: true + openstack-cli-server: true devstack_localrc: # Multinode specific settings HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" @@ -519,8 +576,17 @@ - opendev.org/openstack/nova - opendev.org/openstack/placement - opendev.org/openstack/swift + - opendev.org/openstack/os-test-images timeout: 7200 vars: + # based on observation of the integrated gate + # tempest-integrated-compute was only using ~1.7GB of swap + # when zswap and the host turning are enabled that increase + # slightly to ~2GB. we are setting the swap size to 8GB to + # be safe and account for more complex scenarios. + # we should revisit this value after some time to see if we + # can reduce it. + configure_swap_size: 8192 devstack_localrc: # Common OpenStack services settings SWIFT_REPLICAS: 1 @@ -529,6 +595,26 @@ DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true OVN_DBS_LOG_LEVEL: dbg + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectively this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is proportional to the + # compression ratio and the speed of the swap device. + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 + ENABLE_ZSWAP: true devstack_local_conf: post-config: $NEUTRON_CONF: @@ -541,6 +627,7 @@ dstat: false etcd3: true memory_tracker: true + file_tracker: true mysql: true rabbit: true tls-proxy: true @@ -564,7 +651,7 @@ ovsdb-server: true # Neutron services q-svc: true - q-ovn-metadata-agent: true + q-ovn-agent: true # Swift services s-account: true s-container: true @@ -590,6 +677,7 @@ # Shared services dstat: false memory_tracker: true + file_tracker: true tls-proxy: true # Nova services n-cpu: true @@ -600,7 +688,7 @@ ovs-vswitchd: true ovsdb-server: true # Neutron services - q-ovn-metadata-agent: true + q-ovn-agent: true # Cinder services c-bak: true c-vol: true @@ -619,16 +707,38 @@ Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" NOVA_VNC_ENABLED: true ENABLE_CHASSIS_AS_GW: false + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectivly this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is porportional to the + # compression ratio and the speed of the swap device. + ENABLE_ZSWAP: true + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 - job: name: devstack-ipv6 parent: devstack description: | - Devstack single node job for integration gate with IPv6. + Devstack single node job for integration gate with IPv6, + all services and tunnels using IPv6 addresses. vars: devstack_localrc: SERVICE_IP_VERSION: 6 SERVICE_HOST: "" + TUNNEL_IP_VERSION: 6 - job: name: devstack-enforce-scope @@ -642,7 +752,7 @@ - job: name: devstack-multinode parent: devstack - nodeset: openstack-two-node-focal + nodeset: openstack-two-node-noble description: | Simple multinode test to verify multinode functionality on devstack side. This is not meant to be used as a parent job. @@ -651,26 +761,107 @@ # we often have to rush things through devstack to stabilise the gate, # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. +- job: + name: devstack-platform-almalinux-purple-lion-ovn-source + parent: tempest-full-py3 + description: AlmaLinux 10 platform test + nodeset: devstack-single-node-almalinux-10 + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" + +- job: + name: devstack-platform-centos-10-stream + parent: tempest-full-py3 + description: CentOS 10 Stream platform test + nodeset: devstack-single-node-centos-10-stream + timeout: 9000 + voting: false + - job: name: devstack-platform-centos-9-stream parent: tempest-full-py3 description: CentOS 9 Stream platform test nodeset: devstack-single-node-centos-9-stream + vars: + devstack_localrc: + # TODO(ykarel) Remove this when moving to 10-stream + PYTHON3_VERSION: 3.11 + timeout: 9000 + voting: false + +- job: + name: devstack-platform-debian-trixie + parent: tempest-full-py3 + description: Debian Trixie platform test + nodeset: devstack-single-node-debian-trixie + timeout: 9000 + vars: + configure_swap_size: 4096 + +- job: + name: devstack-platform-debian-bookworm + parent: tempest-full-py3 + description: Debian Bookworm platform test + nodeset: devstack-single-node-debian-bookworm timeout: 9000 vars: configure_swap_size: 4096 - job: - name: devstack-platform-debian-bullseye + name: devstack-platform-rocky-blue-onyx parent: tempest-full-py3 - description: Debian Bullseye platform test - nodeset: devstack-single-node-debian-bullseye + description: Rocky Linux 9 Blue Onyx platform test + nodeset: devstack-single-node-rockylinux-9 + timeout: 9000 + # NOTE(danms): This has been failing lately with some repository metadata + # errors. We're marking this as non-voting until it appears to have + # stabilized: + # https://zuul.openstack.org/builds?job_name=devstack-platform-rocky-blue-onyx&skip=0 voting: false + vars: + configure_swap_size: 4096 + devstack_localrc: + # TODO(ykarel) Remove this when moving to rocky10 + PYTHON3_VERSION: 3.11 + +- job: + name: devstack-platform-rocky-red-quartz + parent: tempest-full-py3 + description: Rocky Linux Red Quartz platform test + nodeset: devstack-single-node-rockylinux-10 timeout: 9000 + voting: false vars: configure_swap_size: 4096 - # NOTE(yoctozepto): Debian Bullseye does not yet offer OVN. Switch to OVS - # for the time being. + +- job: + name: devstack-platform-ubuntu-noble-ovn-source + parent: devstack-platform-ubuntu-noble + description: Ubuntu 24.04 LTS (noble) platform test (OVN from source) + voting: false + vars: + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" + +- job: + name: devstack-platform-ubuntu-noble-ovs + parent: tempest-full-py3 + description: Ubuntu 24.04 LTS (noble) platform test (OVS) + nodeset: openstack-single-node-noble + voting: false + timeout: 9000 + vars: + configure_swap_size: 8192 devstack_localrc: Q_AGENT: openvswitch Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch @@ -701,20 +892,6 @@ # Enable Neutron ML2/OVS services q-agt: true -- job: - name: devstack-platform-openEuler-20.03-SP2 - parent: tempest-full-py3 - description: openEuler 20.03 SP2 platform test - nodeset: devstack-single-node-openeuler-20.03-sp2 - voting: false - timeout: 9000 - vars: - configure_swap_size: 4096 - devstack_localrc: - # NOTE(wxy): OVN package is not supported by openEuler yet. Build it - # from source instead. - OVN_BUILD_FROM_SOURCE: True - - job: name: devstack-no-tls-proxy parent: tempest-full-py3 @@ -726,30 +903,6 @@ devstack_services: tls-proxy: false -- job: - name: devstack-platform-fedora-latest - parent: tempest-full-py3 - description: Fedora latest platform test - nodeset: devstack-single-node-fedora-latest - voting: false - vars: - configure_swap_size: 4096 - # Python 3.10 dependency issues; see - # https://bugs.launchpad.net/horizon/+bug/1960204 - devstack_services: - horizon: false - -- job: - name: devstack-platform-fedora-latest-virt-preview - parent: tempest-full-py3 - description: Fedora latest platform test using the virt-preview repo. - nodeset: devstack-single-node-fedora-latest - voting: false - vars: - configure_swap_size: 4096 - devstack_localrc: - ENABLE_FEDORA_VIRT_PREVIEW_REPO: true - - job: name: devstack-tox-base parent: devstack @@ -806,7 +959,7 @@ - job: name: devstack-unit-tests - nodeset: ubuntu-focal + nodeset: ubuntu-noble description: | Runs unit tests on devstack project. @@ -823,135 +976,98 @@ - devstack - devstack-ipv6 - devstack-enforce-scope - - devstack-platform-fedora-latest + - devstack-platform-almalinux-purple-lion-ovn-source + - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - - devstack-platform-debian-bullseye + - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie + - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - - ironic-tempest-bios-ipmi-direct-tinyipa + - ironic-tempest-bios-ipmi-autodetect - swift-dsvm-functional - grenade: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - neutron-ovs-grenade-multinode: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - - neutron-linuxbridge-tempest: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - neutron-ovn-tempest-ovs-release: voting: false - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-multinode-full-py3: voting: false - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - openstacksdk-functional-devstack: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-ipv6-only: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - nova-ceph-multistore: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files gate: jobs: - devstack - devstack-ipv6 - - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie + - devstack-platform-ubuntu-noble + # NOTE(danms): Disabled due to instability, see comment in the job + # definition above. + # - devstack-platform-rocky-blue-onyx - devstack-enforce-scope - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - neutron-ovs-grenade-multinode: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - - neutron-linuxbridge-tempest: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - - ironic-tempest-bios-ipmi-direct-tinyipa + irrelevant-files: *common-irrelevant-files + - ironic-tempest-bios-ipmi-autodetect - swift-dsvm-functional - grenade: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - openstacksdk-functional-devstack: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-ipv6-only: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - nova-ceph-multistore: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files # Please add a note on each job and conditions for the job not # being experimental any more, so we can keep this list somewhat # pruned. # # * nova-next: maintained by nova for unreleased/undefaulted - # things - # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test - # when neutron-api is served by uwsgi, it's in exprimental for testing. - # the next cycle we can remove this job if things turn out to be - # stable enough. - # * neutron-functional-with-uwsgi: maintained by neutron for functional - # test. Next cycle we can remove this one if things turn out to be - # stable engouh with uwsgi. - # * neutron-tempest-with-uwsgi: maintained by neutron for tempest test. - # Next cycle we can remove this if everything run out stable enough. - # * nova-multi-cell: maintained by nova and currently non-voting in the + # things, this job is not experimental but often is used to test + # things that are not yet production ready or to test what will be + # the new default after a deprecation period has ended. + # * nova-multi-cell: maintained by nova and now is voting in the # check queue for nova changes but relies on devstack configuration - # * devstack-platform-fedora-latest-virt-preview: Maintained by lyarwood - # for Nova to allow early testing of the latest versions of Libvirt and - # QEMU. Should only graduate out of experimental if it ever moves into - # the check queue for Nova. experimental: jobs: - - devstack-platform-openEuler-20.03-SP2 - nova-multi-cell - nova-next - - neutron-fullstack-with-uwsgi - - neutron-functional-with-uwsgi - - neutron-tempest-with-uwsgi - devstack-plugin-ceph-tempest-py3: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - neutron-ovs-tempest-dvr: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - neutron-ovs-tempest-dvr-ha-multinode-full: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - cinder-tempest-lvm-multibackend: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-pg-full: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - - devstack-platform-fedora-latest-virt-preview + irrelevant-files: *common-irrelevant-files - devstack-no-tls-proxy periodic: jobs: - devstack-no-tls-proxy + periodic-weekly: + jobs: + - devstack-platform-almalinux-purple-lion-ovn-source + - devstack-platform-centos-10-stream + - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm + - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs diff --git a/README.rst b/README.rst index f3a585a926..86b85da956 100644 --- a/README.rst +++ b/README.rst @@ -4,7 +4,7 @@ from git source trees. Goals ===== -* To quickly build dev OpenStack environments in a clean Ubuntu or Fedora +* To quickly build dev OpenStack environments in a clean Ubuntu or RockyLinux environment * To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) @@ -28,9 +28,9 @@ Versions The DevStack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the -following to create a Pike OpenStack cloud:: +following to create a Zed OpenStack cloud:: - git checkout stable/pike + git checkout stable/zed ./stack.sh You can also pick specific OpenStack project releases by setting the appropriate @@ -55,7 +55,7 @@ When the script finishes executing, you should be able to access OpenStack endpoints, like so: * Horizon: http://myhost/ -* Keystone: http://myhost/identity/v2.0/ +* Keystone: http://myhost/identity/v3/ We also provide an environment file that you can use to interact with your cloud via CLI:: diff --git a/clean.sh b/clean.sh index 870dfd4313..6dbcb053bc 100755 --- a/clean.sh +++ b/clean.sh @@ -40,7 +40,7 @@ source $TOP_DIR/lib/rpc_backend source $TOP_DIR/lib/tls -source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/libraries source $TOP_DIR/lib/lvm source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone @@ -50,7 +50,6 @@ source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy set -o xtrace @@ -103,7 +102,7 @@ if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; th fi # Clean out /etc -sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift /etc/neutron /etc/openstack/ +sudo rm -rf $KEYSTONE_CONF_DIR $GLANCE_CONF_DIR $NOVA_CONF_DIR $PLACEMENT_CONF_DIR $CINDER_CONF_DIR $SWIFT_CONF_DIR $NEUTRON_CONF_DIR $OPENSTACKCLIENT_CONF_DIR # Clean out tgt sudo rm -f /etc/tgt/conf.d/* diff --git a/doc/requirements.txt b/doc/requirements.txt index ffce3ff74c..7980b93ed7 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -4,8 +4,4 @@ Pygments docutils sphinx>=2.0.0,!=2.1.0 # BSD openstackdocstheme>=2.2.1 # Apache-2.0 -nwdiag -blockdiag -sphinxcontrib-blockdiag -sphinxcontrib-nwdiag zuul-sphinx>=0.2.0 diff --git a/doc/source/assets/images/neutron-network-1.png b/doc/source/assets/images/neutron-network-1.png new file mode 100644 index 0000000000..7730ca93f1 Binary files /dev/null and b/doc/source/assets/images/neutron-network-1.png differ diff --git a/doc/source/assets/images/neutron-network-2.png b/doc/source/assets/images/neutron-network-2.png new file mode 100644 index 0000000000..919935119d Binary files /dev/null and b/doc/source/assets/images/neutron-network-2.png differ diff --git a/doc/source/assets/images/neutron-network-3.png b/doc/source/assets/images/neutron-network-3.png new file mode 100644 index 0000000000..34f03ed5c9 Binary files /dev/null and b/doc/source/assets/images/neutron-network-3.png differ diff --git a/doc/source/conf.py b/doc/source/conf.py index 2e17da17f8..bb0357286a 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -23,14 +23,14 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ 'sphinx.ext.autodoc', - 'zuul_sphinx', - 'openstackdocstheme', - 'sphinxcontrib.blockdiag', - 'sphinxcontrib.nwdiag' ] +extensions = [ + 'sphinx.ext.autodoc', + 'zuul_sphinx', + 'openstackdocstheme', +] # openstackdocstheme options -openstackdocs_repo_name = 'openstack-dev/devstack' +openstackdocs_repo_name = 'openstack/devstack' openstackdocs_pdf_link = True openstackdocs_bug_project = 'devstack' openstackdocs_bug_tag = '' diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 40a8725b8d..3cfba716ca 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -181,6 +181,9 @@ values that most often need to be set. If the ``*_PASSWORD`` variables are not set here you will be prompted to enter values for them by ``stack.sh``. +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + The network ranges must not overlap with any networks in use on the host. Overlap is not uncommon as RFC-1918 'private' ranges are commonly used for both the local networking and Nova's fixed and floating ranges. @@ -320,7 +323,7 @@ a file, keep service logs and disable color in the stored files. [[local|localrc]] DEST=/opt/stack/ - LOGFILE=$LOGDIR/stack.sh.log + LOGFILE=$DEST/stack.sh.log LOG_COLOR=False Database Backend @@ -348,30 +351,21 @@ Example disabling RabbitMQ in ``local.conf``:: disable_service rabbit - Apache Frontend --------------- -The Apache web server can be enabled for wsgi services that support -being deployed under HTTPD + mod_wsgi. By default, services that -recommend running under HTTPD + mod_wsgi are deployed under Apache. To -use an alternative deployment strategy (e.g. eventlet) for services -that support an alternative to HTTPD + mod_wsgi set -``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your -``local.conf``. - -Each service that can be run under HTTPD + mod_wsgi also has an -override toggle available that can be set in your ``local.conf``. - -Keystone is run under Apache with ``mod_wsgi`` by default. - -Example (Keystone):: - - KEYSTONE_USE_MOD_WSGI="True" +The Apache web server is enabled for services that support via WSGI. Today this +means HTTPD and uWSGI but historically this meant HTTPD + mod_wsgi. This +historical legacy is captured by the naming of many variables, which include +``MOD_WSGI`` rather than ``UWSGI``. -Example (Nova):: - - NOVA_USE_MOD_WSGI="True" +Some services support alternative deployment strategies (e.g. eventlet). You +can enable these ``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your +``local.conf``. In addition, each service that can be run under HTTPD + +mod_wsgi also has an override toggle available that can be set in your +``local.conf``. These are, however, slowly being removed as services have +adopted standardized deployment mechanisms and more generally moved away from +eventlet. Example (Swift):: @@ -381,11 +375,6 @@ Example (Heat):: HEAT_USE_MOD_WSGI="True" -Example (Cinder):: - - CINDER_USE_MOD_WSGI="True" - - Libraries from Git ------------------ @@ -521,8 +510,8 @@ behavior: can be configured with any valid IPv6 prefix. The default values make use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193. -Service Version -~~~~~~~~~~~~~~~ +Service IP Version +~~~~~~~~~~~~~~~~~~ DevStack can enable service operation over either IPv4 or IPv6 by setting ``SERVICE_IP_VERSION`` to either ``SERVICE_IP_VERSION=4`` or @@ -542,6 +531,27 @@ optionally be used to alter the default IPv6 address:: HOST_IPV6=${some_local_ipv6_address} +Tunnel IP Version +~~~~~~~~~~~~~~~~~ + +DevStack can enable tunnel operation over either IPv4 or IPv6 by +setting ``TUNNEL_IP_VERSION`` to either ``TUNNEL_IP_VERSION=4`` or +``TUNNEL_IP_VERSION=6`` respectively. + +When set to ``4`` Neutron will use an IPv4 address for tunnel endpoints, +for example, ``HOST_IP``. + +When set to ``6`` Neutron will use an IPv6 address for tunnel endpoints, +for example, ``HOST_IPV6``. + +The default value for this setting is ``4``. Dual-mode support, for +example ``4+6`` is not supported, as this value must match the address +family of the local tunnel endpoint IP(v6) address. + +The value of ``TUNNEL_IP_VERSION`` has a direct relationship to the +setting of ``TUNNEL_ENDPOINT_IP``, which will default to ``HOST_IP`` +when set to ``4``, and ``HOST_IPV6`` when set to ``6``. + Multi-node setup ~~~~~~~~~~~~~~~~ @@ -615,7 +625,7 @@ tests can be run as follows: :: $ cd /opt/stack/tempest - $ tox -efull tempest.scenario.test_network_basic_ops + $ tox -e smoke By default tempest is downloaded and the config file is generated, but the tempest package is not installed in the system's global site-packages (the @@ -648,6 +658,35 @@ adjusted by setting ``CINDER_QUOTA_VOLUMES``, ``CINDER_QUOTA_BACKUPS``, or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value. (The default for each is 10.) +DevStack's Cinder LVM configuration module currently supports both iSCSI and +NVMe connections, and we can choose which one to use with options +``CINDER_TARGET_HELPER``, ``CINDER_TARGET_PROTOCOL``, ``CINDER_TARGET_PREFIX``, +and ``CINDER_TARGET_PORT``. + +Defaults use iSCSI with the LIO target manager:: + + CINDER_TARGET_HELPER="lioadm" + CINDER_TARGET_PROTOCOL="iscsi" + CINDER_TARGET_PREFIX="iqn.2010-10.org.openstack:" + CINDER_TARGET_PORT=3260 + +Additionally there are 3 supported transport protocols for NVMe, +``nvmet_rdma``, ``nvmet_tcp``, and ``nvmet_fc``, and when the ``nvmet`` target +is selected the protocol, prefix, and port defaults will change to more +sensible defaults for NVMe:: + + CINDER_TARGET_HELPER="nvmet" + CINDER_TARGET_PROTOCOL="nvmet_rdma" + CINDER_TARGET_PREFIX="nvme-subsystem-1" + CINDER_TARGET_PORT=4420 + +When selecting the RDMA transport protocol DevStack will create on Cinder nodes +a Software RoCE device on top of the ``HOST_IP_IFACE`` and if it is not defined +then on top of the interface with IP address ``HOST_IP`` or ``HOST_IPV6``. + +This Soft-RoCE device will always be created on the Nova compute side since we +cannot tell beforehand whether there will be an RDMA connection or not. + Keystone ~~~~~~~~ @@ -698,7 +737,7 @@ or at runtime via: :: - openstack --os-cloud devstack-system-admin registered limit update \ + openstack --os-cloud devstack-system-admin registered limit set \ --service glance --default-limit 5000 --region RegionOne image_size_total .. _arch-configuration: diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst index 4de238fbf8..8b5a85b3df 100644 --- a/doc/source/contributor/contributing.rst +++ b/doc/source/contributor/contributing.rst @@ -42,8 +42,9 @@ Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ All changes proposed to the Devstack require two ``Code-Review +2`` votes from Devstack core reviewers before one of the core reviewers can approve the patch -by giving ``Workflow +1`` vote. One exception is for patches to unblock the gate -which can be approved by single core reviewers. +by giving ``Workflow +1`` vote. There are 2 exceptions, approving patches to +unblock the gate and patches that do not relate to the Devstack's core logic, +like for example old job cleanups, can be approved by single core reviewers. Project Team Lead Duties ~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst index fd0d9cdf74..3ca0ad94b4 100644 --- a/doc/source/debugging.rst +++ b/doc/source/debugging.rst @@ -20,6 +20,12 @@ provides consumption output when available memory is seen to be falling (i.e. processes are consuming memory). It also provides output showing locked (unswappable) memory. +file_tracker +------------ + +The ``file_tracker`` service periodically monitors the number of +open files in the system. + tcpdump ------- diff --git a/doc/source/guides.rst b/doc/source/guides.rst index e7ec629962..e7b46b6e55 100644 --- a/doc/source/guides.rst +++ b/doc/source/guides.rst @@ -20,7 +20,7 @@ Walk through various setups used by stackers guides/neutron guides/devstack-with-nested-kvm guides/nova - guides/devstack-with-lbaas-v2 + guides/devstack-with-octavia guides/devstack-with-ldap All-In-One Single VM @@ -69,10 +69,10 @@ Nova and devstack Guide to working with nova features :doc:`Nova and devstack `. -Configure Load-Balancer Version 2 ------------------------------------ +Configure Octavia +----------------- -Guide on :doc:`Configure Load-Balancer Version 2 `. +Guide on :doc:`Configure Octavia `. Deploying DevStack with LDAP ---------------------------- diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst deleted file mode 100644 index 5d96ca7d74..0000000000 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ /dev/null @@ -1,145 +0,0 @@ -Devstack with Octavia Load Balancing -==================================== - -Starting with the OpenStack Pike release, Octavia is now a standalone service -providing load balancing services for OpenStack. - -This guide will show you how to create a devstack with `Octavia API`_ enabled. - -.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html - -Phase 1: Create DevStack + 2 nova instances --------------------------------------------- - -First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space, -make sure it is updated. Install git and any other developer tools you find -useful. - -Install devstack - -:: - - git clone https://opendev.org/openstack/devstack - cd devstack/tools - sudo ./create-stack-user.sh - cd ../.. - sudo mv devstack /opt/stack - sudo chown -R stack.stack /opt/stack/devstack - -This will clone the current devstack code locally, then setup the "stack" -account that devstack services will run under. Finally, it will move devstack -into its default location in /opt/stack/devstack. - -Edit your ``/opt/stack/devstack/local.conf`` to look like - -:: - - [[local|localrc]] - enable_plugin octavia https://opendev.org/openstack/octavia - # If you are enabling horizon, include the octavia dashboard - # enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard.git - # If you are enabling barbican for TLS offload in Octavia, include it here. - # enable_plugin barbican https://opendev.org/openstack/barbican - - # ===== BEGIN localrc ===== - DATABASE_PASSWORD=password - ADMIN_PASSWORD=password - SERVICE_PASSWORD=password - SERVICE_TOKEN=password - RABBIT_PASSWORD=password - # Enable Logging - LOGFILE=$DEST/logs/stack.sh.log - VERBOSE=True - LOG_COLOR=True - # Pre-requisite - ENABLED_SERVICES=rabbit,mysql,key - # Horizon - enable for the OpenStack web GUI - # ENABLED_SERVICES+=,horizon - # Nova - ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy - ENABLED_SERVICES+=,placement-api,placement-client - # Glance - ENABLED_SERVICES+=,g-api - # Neutron - ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron - ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api - # Cinder - ENABLED_SERVICES+=,c-api,c-vol,c-sch - # Tempest - ENABLED_SERVICES+=,tempest - # Barbican - Optionally used for TLS offload in Octavia - # ENABLED_SERVICES+=,barbican - # ===== END localrc ===== - -Run stack.sh and do some sanity checks - -:: - - sudo su - stack - cd /opt/stack/devstack - ./stack.sh - . ./openrc - - openstack network list # should show public and private networks - -Create two nova instances that we can use as test http servers: - -:: - - #create nova instances on private network - openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 - openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 - openstack server list # should show the nova instances just created - - #add secgroup rules to allow ssh etc.. - openstack security group rule create default --protocol icmp - openstack security group rule create default --protocol tcp --dst-port 22:22 - openstack security group rule create default --protocol tcp --dst-port 80:80 - -Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)' or 'gocubsgo') and run - -:: - - MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}') - while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done& - -Phase 2: Create your load balancer ----------------------------------- - -Make sure you have the 'openstack loadbalancer' commands: - -:: - - pip install python-octaviaclient - -Create your load balancer: - -:: - - openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer healthmonitor create --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer member create --subnet-id private-subnet --address --protocol-port 80 pool1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer member create --subnet-id private-subnet --address --protocol-port 80 pool1 - -Please note: The fields are the IP addresses of the nova -servers created in Phase 1. -Also note, using the API directly you can do all of the above commands in one -API call. - -Phase 3: Test your load balancer --------------------------------- - -:: - - openstack loadbalancer show lb1 # Note the vip_address - curl http:// - curl http:// - -This should show the "Welcome to " message from each member server. diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst index 3732f06fd8..ba483e9ec9 100644 --- a/doc/source/guides/devstack-with-nested-kvm.rst +++ b/doc/source/guides/devstack-with-nested-kvm.rst @@ -1,3 +1,5 @@ +.. _kvm_nested_virt: + ======================================================= Configure DevStack with KVM-based Nested Virtualization ======================================================= diff --git a/doc/source/guides/devstack-with-octavia.rst b/doc/source/guides/devstack-with-octavia.rst new file mode 100644 index 0000000000..55939f0f12 --- /dev/null +++ b/doc/source/guides/devstack-with-octavia.rst @@ -0,0 +1,144 @@ +Devstack with Octavia Load Balancing +==================================== + +Starting with the OpenStack Pike release, Octavia is now a standalone service +providing load balancing services for OpenStack. + +This guide will show you how to create a devstack with `Octavia API`_ enabled. + +.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +Phase 1: Create DevStack + 2 nova instances +-------------------------------------------- + +First, set up a VM of your choice with at least 8 GB RAM and 16 GB disk space, +make sure it is updated. Install git and any other developer tools you find +useful. + +Install devstack:: + + git clone https://opendev.org/openstack/devstack + cd devstack/tools + sudo ./create-stack-user.sh + cd ../.. + sudo mv devstack /opt/stack + sudo chown -R stack.stack /opt/stack/devstack + +This will clone the current devstack code locally, then setup the "stack" +account that devstack services will run under. Finally, it will move devstack +into its default location in /opt/stack/devstack. + +Edit your ``/opt/stack/devstack/local.conf`` to look like:: + + [[local|localrc]] + # ===== BEGIN localrc ===== + DATABASE_PASSWORD=password + ADMIN_PASSWORD=password + SERVICE_PASSWORD=password + SERVICE_TOKEN=password + RABBIT_PASSWORD=password + GIT_BASE=https://opendev.org + # Optional settings: + # OCTAVIA_AMP_BASE_OS=centos + # OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID=9-stream + # OCTAVIA_AMP_IMAGE_SIZE=3 + # OCTAVIA_LB_TOPOLOGY=ACTIVE_STANDBY + # OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD=True + # LIBS_FROM_GIT+=octavia-lib, + # Enable Logging + LOGFILE=$DEST/logs/stack.sh.log + VERBOSE=True + LOG_COLOR=True + enable_service rabbit + enable_plugin neutron $GIT_BASE/openstack/neutron + # Octavia supports using QoS policies on the VIP port: + enable_service q-qos + enable_service placement-api placement-client + # Octavia services + enable_plugin octavia $GIT_BASE/openstack/octavia master + enable_plugin octavia-dashboard $GIT_BASE/openstack/octavia-dashboard + enable_plugin ovn-octavia-provider $GIT_BASE/openstack/ovn-octavia-provider + enable_plugin octavia-tempest-plugin $GIT_BASE/openstack/octavia-tempest-plugin + enable_service octavia o-api o-cw o-hm o-hk o-da + # If you are enabling barbican for TLS offload in Octavia, include it here. + # enable_plugin barbican $GIT_BASE/openstack/barbican + # enable_service barbican + # Cinder (optional) + disable_service c-api c-vol c-sch + # Tempest + enable_service tempest + # ===== END localrc ===== + +.. note:: + For best performance it is highly recommended to use KVM + virtualization instead of QEMU. + Also make sure nested virtualization is enabled as documented in + :ref:`the respective guide `. + By adding ``LIBVIRT_CPU_MODE="host-passthrough"`` to your + ``local.conf`` you enable the guest VMs to make use of all features your + host's CPU provides. + +Run stack.sh and do some sanity checks:: + + sudo su - stack + cd /opt/stack/devstack + ./stack.sh + . ./openrc + + openstack network list # should show public and private networks + +Create two nova instances that we can use as test http servers:: + + # create nova instances on private network + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 + openstack server list # should show the nova instances just created + + # add secgroup rules to allow ssh etc.. + openstack security group rule create default --protocol icmp + openstack security group rule create default --protocol tcp --dst-port 22:22 + openstack security group rule create default --protocol tcp --dst-port 80:80 + +Set up a simple web server on each of these instances. One possibility is to use +the `Golang test server`_ that is used by the Octavia project for CI testing +as well. +Copy the binary to your instances and start it as shown below +(username 'cirros', password 'gocubsgo'):: + + INST_IP= + scp -O test_server.bin cirros@${INST_IP}: + ssh -f cirros@${INST_IP} ./test_server.bin -id ${INST_IP} + +When started this way the test server will respond to HTTP requests with +its own IP. + +Phase 2: Create your load balancer +---------------------------------- + +Create your load balancer:: + + openstack loadbalancer create --wait --name lb1 --vip-subnet-id private-subnet + openstack loadbalancer listener create --wait --protocol HTTP --protocol-port 80 --name listener1 lb1 + openstack loadbalancer pool create --wait --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 + openstack loadbalancer healthmonitor create --wait --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + +Please note: The fields are the IP addresses of the nova +servers created in Phase 1. +Also note, using the API directly you can do all of the above commands in one +API call. + +Phase 3: Test your load balancer +-------------------------------- + +:: + + openstack loadbalancer show lb1 # Note the vip_address + curl http:// + curl http:// + +This should show the "Welcome to " message from each member server. + + +.. _Golang test server: https://opendev.org/openstack/octavia-tempest-plugin/src/branch/master/octavia_tempest_plugin/contrib/test_server diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 81c5945307..ef339f1f5c 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -24,7 +24,7 @@ Install a couple of packages to bootstrap configuration: :: - apt-get install -y git sudo || yum install -y git sudo + apt-get install -y git sudo || dnf install -y git sudo Network Configuration --------------------- @@ -75,13 +75,21 @@ Otherwise create the stack user: useradd -s /bin/bash -d /opt/stack -m stack +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +:: + + chmod +x /opt/stack + This user will be making many changes to your system during installation and operation so it needs to have sudo privileges to root without a password: :: - echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack From here on use the ``stack`` user. **Logout** and **login** as the ``stack`` user. @@ -202,6 +210,48 @@ only needs to be performed for subnodes. .. _Cells v2: https://docs.openstack.org/nova/latest/user/cells.html +Configure Tempest Node to run the Tempest tests +----------------------------------------------- + +If there is a need to execute Tempest tests against different Cluster +Controller node then it can be done by re-using the ``local.conf`` file from +the Cluster Controller node but with not enabled Controller services in +``ENABLED_SERVICES`` variable. This variable needs to contain only ``tempest`` +as a configured service. Then variable ``SERVICES_FOR_TEMPEST`` must be +configured to contain those services that were enabled on the Cluster +Controller node in the ``ENABLED_SERVICES`` variable. For example the +``local.conf`` file could look as follows: + +:: + + [[local|localrc]] + HOST_IP=192.168.42.12 # change this per compute node + FIXED_RANGE=10.4.128.0/20 + FLOATING_RANGE=192.168.42.128/25 + LOGFILE=/opt/stack/logs/stack.sh.log + ADMIN_PASSWORD=labstack + DATABASE_PASSWORD=supersecret + RABBIT_PASSWORD=supersecret + SERVICE_PASSWORD=supersecret + DATABASE_TYPE=mysql + SERVICE_HOST=192.168.42.11 + MYSQL_HOST=$SERVICE_HOST + RABBIT_HOST=$SERVICE_HOST + GLANCE_HOSTPORT=$SERVICE_HOST:9292 + NOVA_VNC_ENABLED=True + NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" + VNCSERVER_LISTEN=$HOST_IP + VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN + ENABLED_SERVICES=tempest + SERVICES_FOR_TEMPEST=keystone,nova,neutron,glance + +Then just execute the devstack: + +:: + + ./stack.sh + + Cleaning Up After DevStack -------------------------- diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 2c25a1c350..a7adeeff73 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -41,19 +41,8 @@ network and is on a shared subnet with other machines. The `local.conf` exhibited here assumes that 1500 is a reasonable MTU to use on that network. -.. nwdiag:: - - nwdiag { - inet [ shape = cloud ]; - router; - inet -- router; - - network hardware_network { - address = "172.18.161.0/24" - router [ address = "172.18.161.1" ]; - devstack-1 [ address = "172.18.161.6" ]; - } - } +.. image:: /assets/images/neutron-network-1.png + :alt: Network configuration for a single DevStack node DevStack Configuration @@ -100,21 +89,8 @@ also want to do multinode testing and networking. Physical Network Setup ~~~~~~~~~~~~~~~~~~~~~~ -.. nwdiag:: - - nwdiag { - inet [ shape = cloud ]; - router; - inet -- router; - - network hardware_network { - address = "172.18.161.0/24" - router [ address = "172.18.161.1" ]; - devstack-1 [ address = "172.18.161.6" ]; - devstack-2 [ address = "172.18.161.7" ]; - } - } - +.. image:: /assets/images/neutron-network-2.png + :alt: Network configuration for multiple DevStack nodes After DevStack installs and configures Neutron, traffic from guest VMs flows out of `devstack-2` (the compute node) and is encapsulated in a @@ -222,8 +198,6 @@ connect OpenStack nodes (like `devstack-2`) together. This bridge is used so that project network traffic, using the VXLAN tunneling protocol, flows between each compute node where project instances run. - - DevStack Compute Configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -268,30 +242,8 @@ to the neutron L3 service. Physical Network Setup ---------------------- -.. nwdiag:: - - nwdiag { - inet [ shape = cloud ]; - router; - inet -- router; - - network provider_net { - address = "203.0.113.0/24" - router [ address = "203.0.113.1" ]; - controller; - compute1; - compute2; - } - - network control_plane { - router [ address = "10.0.0.1" ] - address = "10.0.0.0/24" - controller [ address = "10.0.0.2" ] - compute1 [ address = "10.0.0.3" ] - compute2 [ address = "10.0.0.4" ] - } - } - +.. image:: /assets/images/neutron-network-3.png + :alt: Network configuration for provider networks On a compute node, the first interface, eth0 is used for the OpenStack management (API, message bus, etc) as well as for ssh for an @@ -499,44 +451,6 @@ by default. If you want to remove all the extension drivers (even 'port_security'), set ``Q_ML2_PLUGIN_EXT_DRIVERS`` to blank. -Using Linux Bridge instead of Open vSwitch ------------------------------------------- - -The configuration for using the Linux Bridge ML2 driver is fairly -straight forward. The Linux Bridge configuration for DevStack is similar -to the :ref:`Open vSwitch based single interface ` -setup, with small modifications for the interface mappings. - - -:: - - [[local|localrc]] - HOST_IP=172.18.161.6 - SERVICE_HOST=172.18.161.6 - MYSQL_HOST=172.18.161.6 - RABBIT_HOST=172.18.161.6 - GLANCE_HOSTPORT=172.18.161.6:9292 - ADMIN_PASSWORD=secret - DATABASE_PASSWORD=secret - RABBIT_PASSWORD=secret - SERVICE_PASSWORD=secret - - ## Neutron options - Q_USE_SECGROUP=True - FLOATING_RANGE="172.18.161.0/24" - IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/24" - Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254 - PUBLIC_NETWORK_GATEWAY="172.18.161.1" - PUBLIC_INTERFACE=eth0 - - Q_USE_PROVIDERNET_FOR_PUBLIC=True - - # Linuxbridge Settings - Q_AGENT=linuxbridge - LB_PHYSICAL_INTERFACE=eth0 - PUBLIC_PHYSICAL_NETWORK=default - LB_INTERFACE_MAPPINGS=default:eth0 - Using MacVTap instead of Open vSwitch ------------------------------------------ @@ -604,7 +518,7 @@ the MacVTap mechanism driver: [[local|localrc]] ... - Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,macvtap + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,macvtap ... For the MacVTap compute node, use this local.conf: diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index 5b427972c4..6b8aabf8db 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -122,7 +122,7 @@ when creating the server, for example: .. code-block:: shell $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ - --image cirros-0.3.5-x86_64-disk --nic none --wait test-server + --image cirros-0.6.3-x86_64-disk --nic none --wait test-server .. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is required to use ``--nic=none``. diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index a0e97edb37..263fbb9d6f 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -49,13 +49,21 @@ below) $ sudo useradd -s /bin/bash -d /opt/stack -m stack +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +.. code-block:: console + + $ sudo chmod +x /opt/stack + Since this user will be making many changes to your system, it will need to have sudo privileges: .. code-block:: console - $ apt-get install sudo -y || yum install -y sudo - $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + $ apt-get install sudo -y || dnf install -y sudo + $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack .. note:: On some systems you may need to use ``sudo visudo``. @@ -73,7 +81,7 @@ We'll grab the latest version of DevStack via https: .. code-block:: console - $ sudo apt-get install git -y || sudo yum install -y git + $ sudo apt-get install git -y || sudo dnf install -y git $ git clone https://opendev.org/openstack/devstack $ cd devstack @@ -98,6 +106,9 @@ do the following: - Set the service password. This is used by the OpenStack services (Nova, Glance, etc) to authenticate with Keystone. +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + ``local.conf`` should look something like this: .. code-block:: ini diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst index 7dac18b333..4272a4b180 100644 --- a/doc/source/guides/single-vm.rst +++ b/doc/source/guides/single-vm.rst @@ -56,8 +56,8 @@ passed as the user-data file when booting the VM. write_files: - content: | #!/bin/sh - DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update || sudo yum update -qy - DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo yum install -qy git + DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update || sudo dnf update -qy + DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo dnf install -qy git sudo chown stack:stack /home/stack cd /home/stack git clone https://opendev.org/openstack/devstack diff --git a/doc/source/index.rst b/doc/source/index.rst index feb50ce4e9..a07bb84922 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -37,10 +37,10 @@ Install Linux ------------- Start with a clean and minimal install of a Linux system. DevStack -attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL 8, OpenSUSE and openEuler. +attempts to support the two latest LTS releases of Ubuntu, +Rocky Linux 9 and openEuler. -If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the +If you do not have a preference, Ubuntu 24.04 (Noble) is the most tested, and will probably go the smoothest. Add Stack User (optional) @@ -57,6 +57,14 @@ to run DevStack with $ sudo useradd -s /bin/bash -d /opt/stack -m stack +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +.. code-block:: console + + $ sudo chmod +x /opt/stack + Since this user will be making many changes to your system, it should have sudo privileges: @@ -93,7 +101,10 @@ devstack git repo. This is the minimum required config to get started with DevStack. .. note:: There is a sample :download:`local.conf ` file - under the *samples* directory in the devstack repository. + under the *samples* directory in the devstack repository. + +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. Start the install ----------------- @@ -102,7 +113,7 @@ Start the install $ ./stack.sh -This will take a 15 - 20 minutes, largely depending on the speed of +This will take 15 - 30 minutes, largely depending on the speed of your internet connection. Many git trees and packages will be installed during this process. @@ -122,6 +133,8 @@ there. You can ``source openrc`` in your shell, and then use the ``openstack`` command line tool to manage your devstack. +You can :ref:`create a VM and SSH into it `. + You can ``cd /opt/stack/tempest`` and run tempest tests that have been configured to work with your devstack. diff --git a/doc/source/networking.rst b/doc/source/networking.rst index e65c7ef195..10e1c3ff2c 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -68,7 +68,7 @@ Shared Guest Interface .. warning:: This is not a recommended configuration. Because of interactions - between ovs and bridging, if you reboot your box with active + between OVS and bridging, if you reboot your box with active networking you may lose network connectivity to your system. If you need your guests accessible on the network, but only have 1 @@ -114,3 +114,125 @@ For IPv6, ``FIXED_RANGE_V6`` will default to the first /64 of the value of ``FIXED_RANGE_V6`` will just use the value of that directly. ``SUBNETPOOL_PREFIX_V6`` will just default to the value of ``IPV6_ADDRS_SAFE_TO_USE`` directly. + +.. _ssh: + +SSH access to instances +======================= + +To validate connectivity, you can create an instance using the +``$PRIVATE_NETWORK_NAME`` network (default: ``private``), create a floating IP +using the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``), and attach +this floating IP to the instance: + +.. code-block:: shell + + openstack keypair create --public-key ~/.ssh/id_rsa.pub test-keypair + openstack server create --network private --key-name test-keypair ... test-server + fip_id=$(openstack floating ip create public -f value -c id) + openstack server add floating ip test-server ${fip_id} + +Once done, ensure you have enabled SSH and ICMP (ping) access for the security +group used for the instance. You can either create a custom security group and +specify it when creating the instance or add it after creation, or you can +modify the ``default`` security group created by default for each project. +Let's do the latter: + +.. code-block:: shell + + openstack security group rule create --proto icmp --dst-port 0 default + openstack security group rule create --proto tcp --dst-port 22 default + +Finally, SSH into the instance. If you used the Cirros instance uploaded by +default, then you can run the following: + +.. code-block:: shell + + openstack server ssh test-server -- -l cirros + +This will connect using the ``cirros`` user and the keypair you configured when +creating the instance. + +Remote SSH access to instances +============================== + +You can also SSH to created instances on your DevStack host from other hosts. +This can be helpful if you are e.g. deploying DevStack in a VM on an existing +cloud and wish to do development on your local machine. There are a few ways to +do this. + +.. rubric:: Configure instances to be locally accessible + +The most obvious way is to configure guests to be locally accessible, as +described `above `__. This has the advantage of +requiring no further effort on the client. However, it is more involved and +requires either support from your cloud or some inadvisable workarounds. + +.. rubric:: Use your DevStack host as a jump host + +You can choose to use your DevStack host as a jump host. To SSH to a instance +this way, pass the standard ``-J`` option to the ``openstack ssh`` / ``ssh`` +command. For example: + +.. code-block:: + + openstack server ssh test-server -- -l cirros -J username@devstack-host + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +This can also be configured via your ``~/.ssh/config`` file, making it rather +effortless. However, it only allows SSH access. If you want to access e.g. a +web application on the instance, you will need to configure an SSH tunnel and +forward select ports using the ``-L`` option. For example, to forward HTTP +traffic: + +.. code-block:: + + openstack server ssh test-server -- -l cirros -L 8080:username@devstack-host:80 + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +As you can imagine, this can quickly get out of hand, particularly for more +complex guest applications with multiple ports. + +.. rubric:: Use a proxy or VPN tool + +You can use a proxy or VPN tool to enable tunneling for the floating IP +address range of the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``) +defined by ``$FLOATING_RANGE`` (default: ``172.24.4.0/24``). There are many +such tools available to do this. For example, we could use a useful utility +called `shuttle`__. To enable tunneling using ``shuttle``, first ensure you +have allowed SSH and HTTP(S) traffic to your DevStack host. Allowing HTTP(S) +traffic is necessary so you can use the OpenStack APIs remotely. How you do +this will depend on where your DevStack host is running. Once this is done, +install ``sshuttle`` on your localhost: + +.. code-block:: bash + + sudo apt-get install sshuttle || dnf install sshuttle + +Finally, start ``sshuttle`` on your localhost using the floating IP address +range. For example, assuming you are using the default value for +``$FLOATING_RANGE``, you can do: + +.. code-block:: bash + + sshuttle -r username@devstack-host 172.24.4.0/24 + +(where ``username`` and ``devstack-host`` are the username and hostname of your +DevStack host). + +You should now be able to create an instance and SSH into it: + +.. code-block:: bash + + openstack server ssh test-server -- -l cirros + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `) + +.. __: https://github.com/sshuttle/sshuttle diff --git a/doc/source/overview.rst b/doc/source/overview.rst index a609333289..c978e8d2cf 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -23,13 +23,12 @@ strategy to include the latest Ubuntu release and the latest RHEL release.* - Ubuntu: current LTS release plus current development release -- Fedora: current release plus previous release -- RHEL/CentOS: current major release +- RHEL/CentOS/RockyLinux: current major release - Other OS platforms may continue to be included but the maintenance of those platforms shall not be assumed simply due to their presence. Having a listed point-of-contact for each additional OS will greatly increase its chance of being well-maintained. -- Patches for Ubuntu and/or Fedora will not be held up due to +- Patches for Ubuntu and/or RockyLinux will not be held up due to side-effects on other OS platforms. Databases @@ -53,12 +52,6 @@ Web Server - Apache -OpenStack Network ------------------ - -- Neutron: A basic configuration approximating the original FlatDHCP - mode using linuxbridge or OpenVSwitch. - Services -------- diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 2e8e8f53d7..560668f2a0 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -24,36 +24,34 @@ official OpenStack projects. ======================================== === Plugin Name URL ======================================== === +openstack/aetos `https://opendev.org/openstack/aetos `__ openstack/aodh `https://opendev.org/openstack/aodh `__ openstack/barbican `https://opendev.org/openstack/barbican `__ openstack/blazar `https://opendev.org/openstack/blazar `__ openstack/ceilometer `https://opendev.org/openstack/ceilometer `__ -openstack/ceilometer-powervm `https://opendev.org/openstack/ceilometer-powervm `__ -openstack/cinderlib `https://opendev.org/openstack/cinderlib `__ openstack/cloudkitty `https://opendev.org/openstack/cloudkitty `__ openstack/cyborg `https://opendev.org/openstack/cyborg `__ openstack/designate `https://opendev.org/openstack/designate `__ +openstack/designate-tempest-plugin `https://opendev.org/openstack/designate-tempest-plugin `__ openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack-plugin-amqp1 `__ openstack/devstack-plugin-ceph `https://opendev.org/openstack/devstack-plugin-ceph `__ openstack/devstack-plugin-container `https://opendev.org/openstack/devstack-plugin-container `__ openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs `__ openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ -openstack/ec2-api `https://opendev.org/openstack/ec2-api `__ +openstack/devstack-plugin-prometheus `https://opendev.org/openstack/devstack-plugin-prometheus `__ openstack/freezer `https://opendev.org/openstack/freezer `__ openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ openstack/freezer-web-ui `https://opendev.org/openstack/freezer-web-ui `__ +openstack/grian-ui `https://opendev.org/openstack/grian-ui `__ openstack/heat `https://opendev.org/openstack/heat `__ openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard `__ openstack/ironic `https://opendev.org/openstack/ironic `__ -openstack/ironic-inspector `https://opendev.org/openstack/ironic-inspector `__ openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter `__ openstack/ironic-ui `https://opendev.org/openstack/ironic-ui `__ openstack/keystone `https://opendev.org/openstack/keystone `__ -openstack/kuryr-kubernetes `https://opendev.org/openstack/kuryr-kubernetes `__ openstack/kuryr-libnetwork `https://opendev.org/openstack/kuryr-libnetwork `__ -openstack/kuryr-tempest-plugin `https://opendev.org/openstack/kuryr-tempest-plugin `__ openstack/magnum `https://opendev.org/openstack/magnum `__ openstack/magnum-ui `https://opendev.org/openstack/magnum-ui `__ openstack/manila `https://opendev.org/openstack/manila `__ @@ -61,17 +59,10 @@ openstack/manila-tempest-plugin `https://opendev.org/openstack/manila-t openstack/manila-ui `https://opendev.org/openstack/manila-ui `__ openstack/masakari `https://opendev.org/openstack/masakari `__ openstack/mistral `https://opendev.org/openstack/mistral `__ -openstack/monasca-api `https://opendev.org/openstack/monasca-api `__ -openstack/monasca-events-api `https://opendev.org/openstack/monasca-events-api `__ -openstack/monasca-tempest-plugin `https://opendev.org/openstack/monasca-tempest-plugin `__ -openstack/murano `https://opendev.org/openstack/murano `__ openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe `__ openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ -openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv `__ -openstack/networking-odl `https://opendev.org/openstack/networking-odl `__ -openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ @@ -80,22 +71,16 @@ openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron- openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin `__ openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard `__ -openstack/nova-powervm `https://opendev.org/openstack/nova-powervm `__ +openstack/nova `https://opendev.org/openstack/nova `__ openstack/octavia `https://opendev.org/openstack/octavia `__ openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard `__ openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ openstack/openstacksdk `https://opendev.org/openstack/openstacksdk `__ openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ -openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin `__ +openstack/ovn-bgp-agent `https://opendev.org/openstack/ovn-bgp-agent `__ openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ -openstack/patrole `https://opendev.org/openstack/patrole `__ openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ -openstack/sahara `https://opendev.org/openstack/sahara `__ -openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard `__ -openstack/senlin `https://opendev.org/openstack/senlin `__ -openstack/shade `https://opendev.org/openstack/shade `__ openstack/skyline-apiserver `https://opendev.org/openstack/skyline-apiserver `__ -openstack/solum `https://opendev.org/openstack/solum `__ openstack/storlets `https://opendev.org/openstack/storlets `__ openstack/tacker `https://opendev.org/openstack/tacker `__ openstack/tap-as-a-service `https://opendev.org/openstack/tap-as-a-service `__ @@ -182,12 +167,12 @@ x/rsd-virt-for-nova `https://opendev.org/x/rsd-virt-for-nov x/scalpels `https://opendev.org/x/scalpels `__ x/slogging `https://opendev.org/x/slogging `__ x/stackube `https://opendev.org/x/stackube `__ -x/tap-as-a-service-dashboard `https://opendev.org/x/tap-as-a-service-dashboard `__ x/tatu `https://opendev.org/x/tatu `__ x/trio2o `https://opendev.org/x/trio2o `__ x/valet `https://opendev.org/x/valet `__ x/vmware-nsx `https://opendev.org/x/vmware-nsx `__ x/vmware-vspc `https://opendev.org/x/vmware-vspc `__ +x/whitebox-neutron-tempest-plugin `https://opendev.org/x/whitebox-neutron-tempest-plugin `__ ======================================== === diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 7d70d74dd0..fe567e2277 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -232,20 +232,17 @@ an early phase of its execution. These packages may be defined in a plugin as files that contain new-line separated lists of packages required by the plugin -Supported packaging systems include apt and yum across multiple +Supported packaging systems include apt and dnf across multiple distributions. To enable a plugin to hook into this and install package dependencies, packages may be listed at the following locations in the top-level of the plugin repository: - ``./devstack/files/debs/$plugin_name`` - Packages to install when running - on Ubuntu, Debian or Linux Mint. + on Ubuntu or Debian. - ``./devstack/files/rpms/$plugin_name`` - Packages to install when running on Red Hat, Fedora, or CentOS. -- ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when - running on SUSE Linux or openSUSE. - Although there a no plans to remove this method of installing packages, plugins should consider it deprecated for ``bindep`` support described below. diff --git a/doc/source/tempest.rst b/doc/source/tempest.rst new file mode 100644 index 0000000000..65dd5b16b2 --- /dev/null +++ b/doc/source/tempest.rst @@ -0,0 +1,25 @@ +======= +Tempest +======= + +`Tempest`_ is the OpenStack Integration test suite. It is installed by default +and is used to provide integration testing for many of the OpenStack services. +Just like DevStack itself, it is possible to extend Tempest with plugins. In +fact, many Tempest plugin packages also include DevStack plugin to do things +like pre-create required static resources. + +The `Tempest documentation `_ provides a thorough guide to using +Tempest. However, if you simply wish to run the standard set of Tempest tests +against an existing deployment, you can do the following: + +.. code-block:: shell + + cd /opt/stack/tempest + /opt/stack/data/venv/bin/tempest run ... + +The above assumes you have installed DevStack in the default location +(configured via the ``DEST`` configuration variable) and have enabled +virtualenv-based installation in the standard location (configured via the +``USE_VENV`` and ``VENV_DEST`` configuration variables, respectively). + +.. _Tempest: https://docs.openstack.org/tempest/latest/ diff --git a/files/apache-cinder-api.template b/files/apache-cinder-api.template index e1246f11b6..e401803abc 100644 --- a/files/apache-cinder-api.template +++ b/files/apache-cinder-api.template @@ -6,21 +6,13 @@ Listen %PUBLICPORT% WSGIScriptAlias / %CINDER_BIN_DIR%/cinder-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/c-api.log %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - + Require all granted diff --git a/files/apache-horizon.template b/files/apache-horizon.template index efcfc0360b..c6c55ecf27 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -21,22 +21,13 @@ Options Indexes FollowSymLinks MultiViews AllowOverride None - # Apache 2.4 uses mod_authz_host for access control now (instead of - # "Allow") - - Order allow,deny - Allow from all - - = 2.4> - Require all granted - + Require all granted - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/horizon_error.log LogLevel warn CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined +%WSGIPYTHONHOME% WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 1a353e5f4a..d99e8e6ce0 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -23,6 +23,7 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLLISTEN% %SSLENGINE% %SSLLISTEN% %SSLCERTFILE% %SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 %SSLLISTEN% Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public diff --git a/files/apache-neutron.template b/files/apache-neutron.template index c7796b93bf..358e87f5da 100644 --- a/files/apache-neutron.template +++ b/files/apache-neutron.template @@ -24,6 +24,7 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLLISTEN% %SSLENGINE% %SSLLISTEN% %SSLCERTFILE% %SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 %SSLLISTEN% Alias /networking %NEUTRON_BIN%/neutron-api diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template index bcf406edf3..66fcf73cf2 100644 --- a/files/apache-nova-api.template +++ b/files/apache-nova-api.template @@ -6,9 +6,7 @@ Listen %PUBLICPORT% WSGIScriptAlias / %PUBLICWSGI% WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - + ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/nova-api.log %SSLENGINE% %SSLCERTFILE% diff --git a/files/apache-nova-metadata.template b/files/apache-nova-metadata.template index 6231c1ced8..64be03166e 100644 --- a/files/apache-nova-metadata.template +++ b/files/apache-nova-metadata.template @@ -6,9 +6,7 @@ Listen %PUBLICPORT% WSGIScriptAlias / %PUBLICWSGI% WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - + ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/nova-metadata.log %SSLENGINE% %SSLCERTFILE% diff --git a/files/apache-placement-api.template b/files/apache-placement-api.template deleted file mode 100644 index 011abb95fc..0000000000 --- a/files/apache-placement-api.template +++ /dev/null @@ -1,27 +0,0 @@ -# NOTE(sbauza): This virtualhost is only here because some directives can -# only be set by a virtualhost or server context, so that's why the port is not bound. -# TODO(sbauza): Find a better way to identify a free port that is not corresponding to an existing -# vhost. - - WSGIDaemonProcess placement-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup placement-api - WSGIScriptAlias / %PUBLICWSGI% - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - - ErrorLog /var/log/%APACHE_NAME%/placement-api.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - -Alias /placement %PUBLICWSGI% - - SetHandler wsgi-script - Options +ExecCGI - WSGIProcessGroup placement-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - diff --git a/files/debs/general b/files/debs/general index 364f3cc6e2..0cddcf05f4 100644 --- a/files/debs/general +++ b/files/debs/general @@ -11,10 +11,8 @@ gettext # used for compiling message catalogs git graphviz # needed for docs iputils-ping -libapache2-mod-proxy-uwsgi libffi-dev # for pyOpenSSL libjpeg-dev # Pillow 3.0.0 -libpcre3-dev # for python-pcre libpq-dev # psycopg2 libssl-dev # for pyOpenSSL libsystemd-dev # for systemd-python @@ -35,5 +33,4 @@ tcpdump unzip uuid-runtime wget -wget zlib1g-dev diff --git a/files/debs/nova b/files/debs/nova index 0194f00f2c..5c00ad72d9 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -1,7 +1,5 @@ conntrack curl -dnsmasq-base -dnsmasq-utils # for dhcp_release ebtables genisoimage # required for config_drive iptables diff --git a/files/dnsmasq-for-baremetal-from-nova-network.conf b/files/dnsmasq-for-baremetal-from-nova-network.conf deleted file mode 100644 index 66a375190e..0000000000 --- a/files/dnsmasq-for-baremetal-from-nova-network.conf +++ /dev/null @@ -1,3 +0,0 @@ -enable-tftp -tftp-root=/tftpboot -dhcp-boot=pxelinux.0 diff --git a/files/openstack-cli-server/openstack b/files/openstack-cli-server/openstack new file mode 100755 index 0000000000..47fbfc5e17 --- /dev/null +++ b/files/openstack-cli-server/openstack @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import os.path +import json + +server_address = "/tmp/openstack.sock" + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + +try: + sock.connect(server_address) +except socket.error as msg: + print(msg, file=sys.stderr) + sys.exit(1) + + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + while char != b'\n': + length_str += char + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +try: + env = {} + passenv = ["CINDER_VERSION", + "OS_AUTH_URL", + "OS_NO_CACHE", + "OS_PASSWORD", + "OS_PROJECT_NAME", + "OS_REGION_NAME", + "OS_TENANT_NAME", + "OS_USERNAME", + "OS_VOLUME_API_VERSION", + "OS_CLOUD"] + for name in passenv: + if name in os.environ: + env[name] = os.environ[name] + + cmd = { + "app": os.path.basename(sys.argv[0]), + "env": env, + "argv": sys.argv[1:] + } + try: + image_idx = sys.argv.index('image') + create_idx = sys.argv.index('create') + missing_file = image_idx < create_idx and \ + not any(x.startswith('--file') for x in sys.argv) + except ValueError: + missing_file = False + + if missing_file: + # This means we were called with an image create command, but were + # not provided a --file option. That likely means we're being passed + # the image data to stdin, which won't work because we do not proxy + # stdin to the server. So, we just reject the operation and ask the + # caller to provide the file with --file instead. + # We've already connected to the server, we need to send it some dummy + # data so it doesn't wait forever. + send(sock, {}) + print('Image create without --file is not allowed in server mode', + file=sys.stderr) + sys.exit(1) + else: + send(sock, cmd) + + doc = recv(sock) + if doc["stdout"] != b'': + print(doc["stdout"], end='') + if doc["stderr"] != b'': + print(doc["stderr"], file=sys.stderr) + sys.exit(doc["status"]) +finally: + sock.close() diff --git a/files/openstack-cli-server/openstack-cli-server b/files/openstack-cli-server/openstack-cli-server new file mode 100755 index 0000000000..f3d2747e52 --- /dev/null +++ b/files/openstack-cli-server/openstack-cli-server @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import json + +from openstackclient import shell as osc_shell +from io import StringIO + +server_address = "/tmp/openstack.sock" + +try: + os.unlink(server_address) +except OSError: + if os.path.exists(server_address): + raise + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) +print('starting up on %s' % server_address, file=sys.stderr) +sock.bind(server_address) + +# Listen for incoming connections +sock.listen(1) + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + char = sock.recv(1) + while char != b'\n': + length_str += char + char = sock.recv(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +while True: + csock, client_address = sock.accept() + try: + doc = recv(csock) + + print("%s %s" % (doc["app"], doc["argv"]), file=sys.stderr) + oldenv = {} + for name in doc["env"].keys(): + oldenv[name] = os.environ.get(name, None) + os.environ[name] = doc["env"][name] + + try: + old_stdout = sys.stdout + old_stderr = sys.stderr + my_stdout = sys.stdout = StringIO() + my_stderr = sys.stderr = StringIO() + + class Exit(BaseException): + def __init__(self, status): + self.status = status + + def noexit(stat): + raise Exit(stat) + + sys.exit = noexit + + if doc["app"] == "openstack": + sh = osc_shell.OpenStackShell() + ret = sh.run(doc["argv"]) + else: + print("Unknown application %s" % doc["app"], file=sys.stderr) + ret = 1 + except Exit as e: + ret = e.status + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + + for name in oldenv.keys(): + if oldenv[name] is None: + del os.environ[name] + else: + os.environ[name] = oldenv[name] + + send(csock, { + "stdout": my_stdout.getvalue(), + "stderr": my_stderr.getvalue(), + "status": ret, + }) + + except BaseException as e: + print(e, file=sys.stderr) + finally: + csock.close() diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal deleted file mode 100644 index 61f73eeae3..0000000000 --- a/files/rpms-suse/baremetal +++ /dev/null @@ -1 +0,0 @@ -dnsmasq diff --git a/files/rpms-suse/ceph b/files/rpms-suse/ceph deleted file mode 100644 index 8c4955df90..0000000000 --- a/files/rpms-suse/ceph +++ /dev/null @@ -1,3 +0,0 @@ -ceph # NOPRIME -lsb -xfsprogs diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder deleted file mode 100644 index b39cc79a27..0000000000 --- a/files/rpms-suse/cinder +++ /dev/null @@ -1,3 +0,0 @@ -lvm2 -qemu-tools -tgt # NOPRIME diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat deleted file mode 100644 index 2b643b8b1b..0000000000 --- a/files/rpms-suse/dstat +++ /dev/null @@ -1 +0,0 @@ -dstat diff --git a/files/rpms-suse/general b/files/rpms-suse/general deleted file mode 100644 index f63611025c..0000000000 --- a/files/rpms-suse/general +++ /dev/null @@ -1,34 +0,0 @@ -apache2 -apache2-devel -bc -ca-certificates-mozilla -curl -gawk -gcc -gcc-c++ -git-core -graphviz # docs -iputils -libffi-devel # pyOpenSSL -libjpeg8-devel # Pillow 3.0.0 -libopenssl-devel # to rebuild pyOpenSSL if needed -libxslt-devel # lxml -lsof # useful when debugging -make -net-tools -openssh -openssl -pcre-devel # python-pcre -postgresql-devel # psycopg2 -psmisc -python3-systemd -python-cmd2 # dist:opensuse-12.3 -python-devel # pyOpenSSL -python-xml -tar -tcpdump -unzip -util-linux -wget -which -zlib-devel diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon deleted file mode 100644 index 753ea76e04..0000000000 --- a/files/rpms-suse/horizon +++ /dev/null @@ -1,2 +0,0 @@ -apache2-mod_wsgi # NOPRIME -apache2 # NOPRIME diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone deleted file mode 100644 index 66cfc23423..0000000000 --- a/files/rpms-suse/keystone +++ /dev/null @@ -1,4 +0,0 @@ -cyrus-sasl-devel -memcached -openldap2-devel -sqlite3 diff --git a/files/rpms-suse/ldap b/files/rpms-suse/ldap deleted file mode 100644 index 46d26f0796..0000000000 --- a/files/rpms-suse/ldap +++ /dev/null @@ -1,3 +0,0 @@ -openldap2 -openldap2-client -python-ldap diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api deleted file mode 100644 index 0f08daace3..0000000000 --- a/files/rpms-suse/n-api +++ /dev/null @@ -1 +0,0 @@ -python-dateutil diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu deleted file mode 100644 index 9c724cb9d8..0000000000 --- a/files/rpms-suse/n-cpu +++ /dev/null @@ -1,10 +0,0 @@ -cdrkit-cdrtools-compat # dist:sle12 -cryptsetup -dosfstools -libosinfo -lvm2 -mkisofs # not:sle12 -open-iscsi -sg3_utils -# Stuff for diablo volumes -sysfsutils diff --git a/files/rpms-suse/neutron-agent b/files/rpms-suse/neutron-agent deleted file mode 100644 index ea8819e884..0000000000 --- a/files/rpms-suse/neutron-agent +++ /dev/null @@ -1 +0,0 @@ -ipset diff --git a/files/rpms-suse/neutron-common b/files/rpms-suse/neutron-common deleted file mode 100644 index e3799a9353..0000000000 --- a/files/rpms-suse/neutron-common +++ /dev/null @@ -1,12 +0,0 @@ -acl -dnsmasq -dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 -ebtables -haproxy # to serve as metadata proxy inside router/dhcp namespaces -iptables -iputils -rabbitmq-server # NOPRIME -radvd # NOPRIME -sqlite3 -sudo -vlan diff --git a/files/rpms-suse/neutron-l3 b/files/rpms-suse/neutron-l3 deleted file mode 100644 index a7a190c063..0000000000 --- a/files/rpms-suse/neutron-l3 +++ /dev/null @@ -1,2 +0,0 @@ -conntrack-tools -keepalived diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova deleted file mode 100644 index 1cc2f62ea5..0000000000 --- a/files/rpms-suse/nova +++ /dev/null @@ -1,23 +0,0 @@ -cdrkit-cdrtools-compat # dist:sle12 -conntrack-tools -curl -dnsmasq -dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 -ebtables -iptables -iputils -kpartx -kvm # NOPRIME -libvirt # NOPRIME -libvirt-python # NOPRIME -# mkisofs is required for config_drive -mkisofs # not:sle12 -parted -polkit -# qemu as fallback if kvm cannot be used -qemu # NOPRIME -rabbitmq-server # NOPRIME -socat -sqlite3 -sudo -vlan diff --git a/files/rpms-suse/openvswitch b/files/rpms-suse/openvswitch deleted file mode 100644 index 53f8bb22cf..0000000000 --- a/files/rpms-suse/openvswitch +++ /dev/null @@ -1,3 +0,0 @@ - -openvswitch -openvswitch-switch diff --git a/files/rpms-suse/os-brick b/files/rpms-suse/os-brick deleted file mode 100644 index 67b33a9861..0000000000 --- a/files/rpms-suse/os-brick +++ /dev/null @@ -1,2 +0,0 @@ -lsscsi -open-iscsi diff --git a/files/rpms-suse/q-agt b/files/rpms-suse/q-agt deleted file mode 120000 index 99fe353094..0000000000 --- a/files/rpms-suse/q-agt +++ /dev/null @@ -1 +0,0 @@ -neutron-agent \ No newline at end of file diff --git a/files/rpms-suse/q-l3 b/files/rpms-suse/q-l3 deleted file mode 120000 index 0a5ca2a45f..0000000000 --- a/files/rpms-suse/q-l3 +++ /dev/null @@ -1 +0,0 @@ -neutron-l3 \ No newline at end of file diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift deleted file mode 100644 index 3663b98545..0000000000 --- a/files/rpms-suse/swift +++ /dev/null @@ -1,6 +0,0 @@ -curl -liberasurecode-devel -memcached -sqlite3 -xfsprogs -xinetd diff --git a/files/rpms/ceph b/files/rpms/ceph index 93b5746aa6..19f158fd57 100644 --- a/files/rpms/ceph +++ b/files/rpms/ceph @@ -1,3 +1,3 @@ ceph # NOPRIME -redhat-lsb-core # not:rhel9,openEuler-20.03 +redhat-lsb-core # not:rhel9,openEuler-22.03 xfsprogs diff --git a/files/rpms/general b/files/rpms/general index 668705b1c3..6f4572c708 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -6,31 +6,33 @@ gcc gcc-c++ gettext # used for compiling message catalogs git-core +glibc-langpack-en # dist:rhel9 graphviz # needed only for docs httpd httpd-devel +iptables-nft # dist:rhel9,rhel10 iptables-services -java-1.8.0-openjdk-headless +java-1.8.0-openjdk-headless # not:rhel10 +java-21-openjdk-headless # dist:rhel10 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml libxslt-devel # lxml libyaml-devel -make # dist:openEuler-20.03 mod_ssl # required for tls-proxy on centos 9 stream computes net-tools openssh-server openssl openssl-devel # to rebuild pyOpenSSL if needed -pcre-devel # for python-pcre +pcre2-devel # dist:rhel10 for python-pcre2 +pcre-devel # not:rhel10 for python-pcre pkgconfig postgresql-devel # psycopg2 psmisc python3-devel -python3-pip +python3-pip # not:openEuler-22.03 python3-systemd -redhat-rpm-config # not:openEuler-20.03 missing dep for gcc hardening flags, see rhbz#1217376 -systemd-devel # dist:openEuler-20.03 +redhat-rpm-config # not:openEuler-22.03 missing dep for gcc hardening flags, see rhbz#1217376 tar tcpdump unzip diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 7ce5a72d6b..3d50f3a062 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -1,10 +1,9 @@ cryptsetup dosfstools -genisoimage # not:rhel9 iscsi-initiator-utils libosinfo lvm2 sg3_utils # Stuff for diablo volumes sysfsutils -xorriso # not:rhel8 +xorriso diff --git a/files/rpms/nova b/files/rpms/nova index 9e8621c628..d0f843bb60 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -1,16 +1,13 @@ conntrack-tools curl -dnsmasq # for q-dhcp -dnsmasq-utils # for dhcp_release ebtables -genisoimage # not:rhel9 required for config_drive iptables iputils -kernel-modules # not:openEuler-20.03 +kernel-modules # not:openEuler-22.03 kpartx parted polkit rabbitmq-server # NOPRIME sqlite sudo -xorriso # not:rhel8 +xorriso diff --git a/files/rpms/swift b/files/rpms/swift index a838d7839e..c3921a47d4 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,7 +1,6 @@ curl -liberasurecode-devel # not:openEuler-20.03 +liberasurecode-devel memcached rsync-daemon sqlite xfsprogs -xinetd # not:f35,rhel9 diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index c49f716fa7..937d6c4b9a 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -6,74 +6,74 @@ address = 127.0.0.1 [account6612] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6612.lock [account6622] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6622.lock [account6632] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6632.lock [account6642] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6642.lock [container6611] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6611.lock [container6621] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6621.lock [container6631] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6631.lock [container6641] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6641.lock [object6613] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6613.lock [object6623] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6623.lock [object6633] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6633.lock [object6643] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6643.lock diff --git a/functions b/functions index ccca5cda51..63c6318c2e 100644 --- a/functions +++ b/functions @@ -47,6 +47,10 @@ function short_source { # export it so child shells have access to the 'short_source' function also. export -f short_source +EXTRA_FILES_RETRY=${EXTRA_FILES_RETRY:-3} +EXTRA_FILES_RETRY_ERRORS=${EXTRA_FILES_RETRY_ERRORS:-"500,503"} +EXTRA_FILES_DOWNLOAD_TIMEOUT=${EXTRA_FILES_DOWNLOAD_TIMEOUT:-2} +EXTRA_FILES_RETRY_TIMEOUT=${EXTRA_FILES_RETRY_TIMEOUT:-10} # Download a file from a URL # # Will check cache (in $FILES) or download given URL. @@ -55,17 +59,20 @@ export -f short_source # # Will echo the local path to the file as the output. Will die on # failure to download. -# + # Files can be pre-cached for CI environments, see EXTRA_CACHE_URLS # and tools/image_list.sh function get_extra_file { local file_url=$1 - - file_name=$(basename "$file_url") + local retry_args="--retry-on-host-error --retry-on-http-error=${EXTRA_FILES_RETRY_ERRORS} " + retry_args+="-t ${EXTRA_FILES_DOWNLOAD_TIMEOUT} --waitretry=${EXTRA_FILES_RETRY_TIMEOUT} " + retry_args+="--tries=${EXTRA_FILES_RETRY} --retry-connrefused" + # Using Bash parameter expansion (##*/) instead of external 'basename' + local file_name="${file_url##*/}" if [[ $file_url != file* ]]; then # If the file isn't cache, download it if [[ ! -f $FILES/$file_name ]]; then - wget --progress=dot:giga -t 2 -c $file_url -O $FILES/$file_name + wget --progress=dot:giga ${retry_args} -c $file_url -O $FILES/$file_name if [[ $? -ne 0 ]]; then die "$file_url could not be downloaded" fi @@ -74,7 +81,7 @@ function get_extra_file { return else # just strip the file:// bit and that's the path to the file - echo $file_url | sed 's/$file:\/\///g' + echo "${file_url#file://}" fi } @@ -118,7 +125,7 @@ function _upload_image { useimport="--import" fi - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties < "${image}" + openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties --file $(readlink -f "${image}") } # Retrieve an image from a URL and upload into Glance. @@ -133,17 +140,29 @@ function upload_image { local image image_fname image_name + local max_attempts=5 + # Create a directory for the downloaded image tarballs. mkdir -p $FILES/images image_fname=`basename "$image_url"` if [[ $image_url != file* ]]; then # Downloads the image (uec ami+akistyle), then extracts it. if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then - wget --progress=dot:giga -c $image_url -O $FILES/$image_fname - if [[ $? -ne 0 ]]; then - echo "Not found: $image_url" - return - fi + for attempt in `seq $max_attempts`; do + local rc=0 + wget --progress=dot:giga -c $image_url -O $FILES/$image_fname || rc=$? + if [[ $rc -ne 0 ]]; then + if [[ "$attempt" -eq "$max_attempts" ]]; then + echo "Not found: $image_url" + # Signal failure to download to the caller, so they can fail early + return 1 + fi + echo "Download failed, retrying in $attempt second, attempt: $attempt" + sleep $attempt + else + break + fi + done fi image="$FILES/${image_fname}" else @@ -414,10 +433,10 @@ function upload_image { # kernel for use when uploading the root filesystem. local kernel_id="" ramdisk_id=""; if [ -n "$kernel" ]; then - kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2) + kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki --file $(readlink -f "$kernel") -f value -c id) fi if [ -n "$ramdisk" ]; then - ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2) + ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari --file $(readlink -f "$ramdisk") -f value -c id) fi _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property fi @@ -683,6 +702,8 @@ function setup_colorized_logging { iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR } function setup_systemd_logging { @@ -704,6 +725,9 @@ function setup_systemd_logging { iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s %(instance)s" + + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR } function setup_standard_logging_identity { diff --git a/functions-common b/functions-common index b660245337..b3a4b57f01 100644 --- a/functions-common +++ b/functions-common @@ -43,13 +43,18 @@ declare -A -g GITREPO declare -A -g GITBRANCH declare -A -g GITDIR +# Systemd service file environment variables per service +declare -A -g SYSTEMD_ENV_VARS + KILL_PATH="$(which kill)" # Save these variables to .stackenv STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \ KEYSTONE_SERVICE_URI \ LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \ - HOST_IPV6 SERVICE_IP_VERSION" + HOST_IPV6 SERVICE_IP_VERSION TUNNEL_ENDPOINT_IP TUNNEL_IP_VERSION" + +OPENSTACKCLIENT_CONF_DIR=/etc/openstack # Saves significant environment variables to .stackenv for later use @@ -76,10 +81,10 @@ function write_clouds_yaml { # overridable. There is currently no usecase where doing so makes sense, so # it's not currently configurable. - CLOUDS_YAML=/etc/openstack/clouds.yaml + CLOUDS_YAML=${OPENSTACKCLIENT_CONF_DIR}/clouds.yaml - sudo mkdir -p $(dirname $CLOUDS_YAML) - sudo chown -R $STACK_USER /etc/openstack + sudo mkdir -p $OPENSTACKCLIENT_CONF_DIR + sudo chown -R $STACK_USER $OPENSTACKCLIENT_CONF_DIR CA_CERT_ARG='' if [ -f "$SSL_BUNDLE_FILE" ]; then @@ -236,6 +241,27 @@ function trueorfalse { $xtrace } +# bool_to_int +# +# Convert True|False to int 1 or 0 +# This function can be used to convert the output of trueorfalse +# to an int follow c conventions where false is 0 and 1 it true. +function bool_to_int { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + if [ -z $1 ]; then + die $LINENO "Bool value required" + fi + if [[ $1 == "True" ]] ; then + echo '1' + else + echo '0' + fi + $xtrace +} + + function isset { [[ -v "$1" ]] } @@ -380,9 +406,9 @@ function warn { # such as "install_package" further abstract things in better ways. # # ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc -# ``os_RELEASE`` - major release: ``16.04`` (Ubuntu), ``23`` (Fedora) +# ``os_RELEASE`` - major release: ``22.04`` (Ubuntu), ``23`` (Fedora) # ``os_PACKAGE`` - package type: ``deb`` or ``rpm`` -# ``os_CODENAME`` - vendor's codename for release: ``xenial`` +# ``os_CODENAME`` - vendor's codename for release: ``jammy`` declare -g os_VENDOR os_RELEASE os_PACKAGE os_CODENAME @@ -399,7 +425,7 @@ function _ensure_lsb_release { elif [[ -x $(command -v zypper 2>/dev/null) ]]; then sudo zypper -n install lsb-release elif [[ -x $(command -v dnf 2>/dev/null) ]]; then - sudo dnf install -y redhat-lsb-core || sudo dnf install -y openeuler-lsb + sudo dnf install -y python3-distro || sudo dnf install -y openeuler-lsb else die $LINENO "Unable to find or auto-install lsb_release" fi @@ -412,11 +438,11 @@ function _ensure_lsb_release { # - os_VENDOR # - os_PACKAGE function GetOSVersion { - # CentOS Stream 9 does not provide lsb_release + # CentOS Stream 9 or later and RHEL 9 or later do not provide lsb_release source /etc/os-release - if [[ "${ID}${VERSION}" == "centos9" ]]; then + if [[ "${ID}" =~ (almalinux|centos|rocky|rhel) ]]; then os_RELEASE=${VERSION_ID} - os_CODENAME="n/a" + os_CODENAME=$(echo $VERSION | grep -oP '(?<=[(])[^)]*' || echo 'n/a') os_VENDOR=$(echo $NAME | tr -d '[:space:]') else _ensure_lsb_release @@ -426,7 +452,7 @@ function GetOSVersion { os_VENDOR=$(lsb_release -i -s) fi - if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then + if [[ $os_VENDOR =~ (Debian|Ubuntu) ]]; then os_PACKAGE="deb" else os_PACKAGE="rpm" @@ -444,36 +470,23 @@ declare -g DISTRO function GetDistro { GetOSVersion - if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) || \ - "$os_VENDOR" =~ (LinuxMint) ]]; then - # 'Everyone' refers to Ubuntu / Debian / Mint releases by + if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then + # 'Everyone' refers to Ubuntu / Debian releases by # the code name adjective DISTRO=$os_CODENAME elif [[ "$os_VENDOR" =~ (Fedora) ]]; then # For Fedora, just use 'f' and the release DISTRO="f$os_RELEASE" - elif is_opensuse; then - DISTRO="opensuse-$os_RELEASE" - # Tumbleweed uses "n/a" as a codename, and the release is a datestring - # like 20180218, so not very useful. Leap however uses a release - # with a "dot", so for example 15.0 - [ "$os_CODENAME" = "n/a" -a "$os_RELEASE" = "${os_RELEASE/\./}" ] && \ - DISTRO="opensuse-tumbleweed" - elif is_suse_linux_enterprise; then - # just use major release - DISTRO="sle${os_RELEASE%.*}" elif [[ "$os_VENDOR" =~ (Red.*Hat) || \ "$os_VENDOR" =~ (CentOS) || \ "$os_VENDOR" =~ (AlmaLinux) || \ "$os_VENDOR" =~ (Scientific) || \ "$os_VENDOR" =~ (OracleServer) || \ + "$os_VENDOR" =~ (RockyLinux) || \ "$os_VENDOR" =~ (Virtuozzo) ]]; then - # Drop the . release as we assume it's compatible - # XXX re-evaluate when we get RHEL10 - DISTRO="rhel${os_RELEASE::1}" + MAJOR_VERSION=$(echo $os_RELEASE | cut -d. -f1) + DISTRO="rhel${MAJOR_VERSION}" elif [[ "$os_VENDOR" =~ (openEuler) ]]; then - # The DISTRO here is `openEuler-20.03`. While, actually only openEuler - # 20.03 LTS SP2 is fully tested. Other SP version maybe have bugs. DISTRO="openEuler-$os_RELEASE" else # We can't make a good choice here. Setting a sensible DISTRO @@ -506,19 +519,8 @@ function is_arch { [[ "$(uname -m)" == "$1" ]] } -# Determine if current distribution is an Oracle distribution -# is_oraclelinux -function is_oraclelinux { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [ "$os_VENDOR" = "OracleServer" ] -} - - # Determine if current distribution is a Fedora-based distribution -# (Fedora, RHEL, CentOS, etc). +# (Fedora, RHEL, CentOS, Rocky, etc). # is_fedora function is_fedora { if [[ -z "$os_VENDOR" ]]; then @@ -529,43 +531,14 @@ function is_fedora { [ "$os_VENDOR" = "openEuler" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ + [ "$os_VENDOR" = "RedHatEnterpriseLinux" ] || \ + [ "$os_VENDOR" = "RockyLinux" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ [ "$os_VENDOR" = "AlmaLinux" ] || \ [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ] } -# Determine if current distribution is a SUSE-based distribution -# (openSUSE, SLE). -# is_suse -function is_suse { - is_opensuse || is_suse_linux_enterprise -} - - -# Determine if current distribution is an openSUSE distribution -# is_opensuse -function is_opensuse { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [[ "$os_VENDOR" =~ (openSUSE) ]] -} - - -# Determine if current distribution is a SUSE Linux Enterprise (SLE) -# distribution -# is_suse_linux_enterprise -function is_suse_linux_enterprise { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [[ "$os_VENDOR" =~ (^SUSE) ]] -} - - # Determine if current distribution is an Ubuntu-based distribution # It will also detect non-Ubuntu but Debian-based distros # is_ubuntu @@ -576,6 +549,8 @@ function is_ubuntu { [ "$os_PACKAGE" = "deb" ] } +# Determine if current distribution is an openEuler distribution +# is_openeuler function is_openeuler { if [[ -z "$os_PACKAGE" ]]; then GetOSVersion @@ -646,8 +621,10 @@ function git_clone { echo "the project to the \$PROJECTS variable in the job definition." die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" fi - # '--branch' can also take tags - git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref + git_timed clone --no-checkout $git_clone_flags $git_remote $git_dest + cd $git_dest + git_timed fetch $git_clone_flags origin $git_ref + git_timed checkout FETCH_HEAD elif [[ "$RECLONE" = "True" ]]; then # if it does exist then simulate what clone does if asked to RECLONE cd $git_dest @@ -657,7 +634,7 @@ function git_clone { # remove the existing ignored files (like pyc) as they cause breakage # (due to the py files having older timestamps than our pyc, so python # thinks the pyc files are correct using them) - find $git_dest -name '*.pyc' -delete + sudo find $git_dest -name '*.pyc' -delete # handle git_ref accordingly to type (tag, branch) if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then @@ -784,7 +761,7 @@ function get_default_host_ip { if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then host_ip="" # Find the interface used for the default route - host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)} + host_ip_iface=${host_ip_iface:-$(ip -f $af route list match default table all | grep via | awk '/default/ {print $5}' | head -1)} local host_ips host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}') local ip @@ -887,14 +864,9 @@ function policy_add { # Usage: get_or_create_domain function get_or_create_domain { local domain_id - # Gets domain id domain_id=$( - # Gets domain id - openstack --os-cloud devstack-system-admin domain show $1 \ - -f value -c id 2>/dev/null || - # Creates new domain openstack --os-cloud devstack-system-admin domain create $1 \ - --description "$2" \ + --description "$2" --or-show \ -f value -c id ) echo $domain_id @@ -983,29 +955,22 @@ function _get_domain_args { # Usage: get_or_add_user_project_role [ ] function get_or_add_user_project_role { local user_role_id + local domain_args domain_args=$(_get_domain_args $4 $5) - # Gets user role id + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --project $3 \ + $domain_args user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --project $3 \ $domain_args \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - if [[ -z "$user_role_id" ]]; then - # Adds role to user and get it - openstack --os-cloud devstack-system-admin role add $1 \ - --user $2 \ - --project $3 \ - $domain_args - user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ - --role $1 \ - --user $2 \ - --project $3 \ - $domain_args \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - fi + -c Role -f value) echo $user_role_id } @@ -1013,23 +978,18 @@ function get_or_add_user_project_role { # Usage: get_or_add_user_domain_role function get_or_add_user_domain_role { local user_role_id - # Gets user role id + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --domain $3 user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --domain $3 \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - if [[ -z "$user_role_id" ]]; then - # Adds role to user and get it - openstack --os-cloud devstack-system-admin role add $1 \ - --user $2 \ - --domain $3 - user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ - --role $1 \ - --user $2 \ - --domain $3 \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - fi + -c Role -f value) + echo $user_role_id } @@ -1068,23 +1028,18 @@ function get_or_add_user_system_role { # Usage: get_or_add_group_project_role function get_or_add_group_project_role { local group_role_id - # Gets group role id + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack role add $1 \ + --group $2 \ + --project $3 group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --group $2 \ --project $3 \ - -f value) - if [[ -z "$group_role_id" ]]; then - # Adds role to group and get it - openstack --os-cloud devstack-system-admin role add $1 \ - --group $2 \ - --project $3 - group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ - --role $1 \ - --group $2 \ - --project $3 \ - -f value) - fi + -f value -c Role) + echo $group_role_id } @@ -1170,6 +1125,12 @@ function is_ironic_enforce_scope { return 1 } +function is_ironic_sharded { + # todo(JayF): Support >1 shard with multiple n-cpu instances for each + is_service_enabled ironic && [[ "$IRONIC_SHARDS" == "1" ]] && return 0 + return 1 +} + # Package Functions # ================= @@ -1186,8 +1147,6 @@ function _get_package_dir { pkg_dir=$base_dir/debs elif is_fedora; then pkg_dir=$base_dir/rpms - elif is_suse; then - pkg_dir=$base_dir/rpms-suse else exit_distro_not_supported "list of packages" fi @@ -1462,8 +1421,6 @@ function real_install_package { apt_get install "$@" elif is_fedora; then yum_install "$@" - elif is_suse; then - zypper_install "$@" else exit_distro_not_supported "installing packages" fi @@ -1505,8 +1462,6 @@ function uninstall_package { apt_get purge "$@" elif is_fedora; then sudo dnf remove -y "$@" ||: - elif is_suse; then - sudo zypper remove -y "$@" ||: else exit_distro_not_supported "uninstalling packages" fi @@ -1575,6 +1530,7 @@ function write_user_unit_file { local command="$2" local group=$3 local user=$4 + local env_vars="$5" local extra="" if [[ -n "$group" ]]; then extra="Group=$group" @@ -1583,11 +1539,15 @@ function write_user_unit_file { mkdir -p $SYSTEMD_DIR iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" iniset -sudo $unitfile "Service" "User" "$user" iniset -sudo $unitfile "Service" "ExecStart" "$command" iniset -sudo $unitfile "Service" "KillMode" "process" iniset -sudo $unitfile "Service" "TimeoutStopSec" "300" iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID" + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi if [[ -n "$group" ]]; then iniset -sudo $unitfile "Service" "Group" "$group" fi @@ -1602,10 +1562,12 @@ function write_uwsgi_user_unit_file { local command="$2" local group=$3 local user=$4 + local env_vars="$5" local unitfile="$SYSTEMD_DIR/$service" mkdir -p $SYSTEMD_DIR iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" iniset -sudo $unitfile "Service" "SyslogIdentifier" "$service" iniset -sudo $unitfile "Service" "User" "$user" iniset -sudo $unitfile "Service" "ExecStart" "$command" @@ -1616,6 +1578,9 @@ function write_uwsgi_user_unit_file { iniset -sudo $unitfile "Service" "NotifyAccess" "all" iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100" + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi if [[ -n "$group" ]]; then iniset -sudo $unitfile "Service" "Group" "$group" fi @@ -1663,10 +1628,20 @@ function _run_under_systemd { local systemd_service="devstack@$service.service" local group=$3 local user=${4:-$STACK_USER} + if [[ -z "$user" ]]; then + user=$STACK_USER + fi + local env_vars="$5" + if [[ -v SYSTEMD_ENV_VARS[$service] ]]; then + env_vars="${SYSTEMD_ENV_VARS[$service]} $env_vars" + fi if [[ "$command" =~ "uwsgi" ]] ; then - write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" + if [[ "$GLOBAL_VENV" == "True" ]] ; then + cmd="$cmd --venv $DEVSTACK_VENV" + fi + write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" else - write_user_unit_file $systemd_service "$cmd" "$group" "$user" + write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" fi $SYSTEMCTL enable $systemd_service @@ -1687,18 +1662,20 @@ function is_running { # If the command includes shell metachatacters (;<>*) it must be run using a shell # If an optional group is provided sg will be used to run the # command as that group. -# run_process service "command-line" [group] [user] +# run_process service "command-line" [group] [user] [env_vars] +# env_vars must be a space separated list of variable assigments, ie: "A=1 B=2" function run_process { local service=$1 local command="$2" local group=$3 local user=$4 + local env_vars="$5" local name=$service time_start "run_process" if is_service_enabled $service; then - _run_under_systemd "$name" "$command" "$group" "$user" + _run_under_systemd "$name" "$command" "$group" "$user" "$env_vars" fi time_stop "run_process" } @@ -2454,6 +2431,11 @@ function time_stop { _TIME_TOTAL[$name]=$(($total + $elapsed_time)) } +function install_openstack_cli_server { + export PATH=$TOP_DIR/files/openstack-cli-server:$PATH + run_process openstack-cli-server "$PYTHON $TOP_DIR/files/openstack-cli-server/openstack-cli-server" +} + function oscwrap { local xtrace xtrace=$(set +o | grep xtrace) @@ -2549,6 +2531,11 @@ function clean_pyc_files { fi } +function is_fips_enabled { + fips=`cat /proc/sys/crypto/fips_enabled` + [ "$fips" == "1" ] +} + # Restore xtrace $_XTRACE_FUNCTIONS_COMMON diff --git a/inc/ini-config b/inc/ini-config index 79936823d2..920d4775fa 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -189,6 +189,11 @@ function iniset { local option=$3 local value=$4 + # Escape the ampersand (&) and backslash (\) characters for sed + # Order of substitution matters: we escape backslashes first before + # adding more backslashes to escape ampersands + value=$(echo $value | sed -e 's/\\/\\\\/g' -e 's/&/\\&/g') + if [[ -z $section || -z $option ]]; then $xtrace return diff --git a/inc/meta-config b/inc/meta-config index be73b60800..1215bb8307 100644 --- a/inc/meta-config +++ b/inc/meta-config @@ -185,11 +185,15 @@ function merge_config_group { break fi dir=$(dirname $realconfigfile) - if [[ -d $dir ]]; then - merge_config_file $localfile $group $configfile - else - die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir is not a directory)" + + test -e $dir && ! test -d $dir && die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir exists but it is not a directory)" + + if ! [[ -e $dir ]] ; then + sudo mkdir -p $dir || die $LINENO "could not create the directory of $real_configfile ($configfile)" + sudo chown ${STACK_USER} $dir fi + + merge_config_file $localfile $group $configfile done done } diff --git a/inc/python b/inc/python index 9382d352dc..3969c1fa82 100644 --- a/inc/python +++ b/inc/python @@ -7,7 +7,6 @@ # External functions used: # - GetOSVersion # - is_fedora -# - is_suse # - safe_chown # Save trace setting @@ -33,6 +32,23 @@ function join_extras { # Python Functions # ================ +# Setup the global devstack virtualenvs and the associated environment +# updates. +function setup_devstack_virtualenv { + # We run devstack out of a global virtualenv. + if [[ ! -d $DEVSTACK_VENV ]] ; then + # Using system site packages to enable nova to use libguestfs. + # This package is currently installed via the distro and not + # available on pypi. + $PYTHON -m venv --system-site-packages "${DEVSTACK_VENV}" + pip_install -U pip setuptools[core] + fi + if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then + export PATH="$DEVSTACK_VENV/bin:$PATH" + export PYTHON="$DEVSTACK_VENV/bin/python3" + fi +} + # Get the path to the pip command. # get_pip_command function get_pip_command { @@ -61,9 +77,11 @@ function get_python_exec_prefix { fi $xtrace - local PYTHON_PATH=/usr/local/bin - is_suse && PYTHON_PATH=/usr/bin - echo $PYTHON_PATH + if [[ "$GLOBAL_VENV" == "True" ]] ; then + echo "$DEVSTACK_VENV/bin" + else + echo "/usr/local/bin" + fi } # Wrapper for ``pip install`` that only installs versions of libraries @@ -168,15 +186,17 @@ function pip_install { if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip local sudo_pip="env" + elif [[ "${GLOBAL_VENV}" == "True" && -d ${DEVSTACK_VENV} ]] ; then + # We have to check that the DEVSTACK_VENV exists because early + # devstack boostrapping needs to operate in a system context + # too bootstrap pip. Once pip is bootstrapped we create the + # global venv and can start to use it. + local cmd_pip=$DEVSTACK_VENV/bin/pip + local sudo_pip="env" + echo "Using python $PYTHON3_VERSION to install $package_dir" else local cmd_pip="python$PYTHON3_VERSION -m pip" - # See - # https://github.com/pypa/setuptools/issues/2232 - # http://lists.openstack.org/pipermail/openstack-discuss/2020-August/016905.html - # this makes setuptools >=50 use the platform distutils. - # We only want to do this on global pip installs, not if - # installing in a virtualenv - local sudo_pip="sudo -H LC_ALL=en_US.UTF-8 SETUPTOOLS_USE_DISTUTILS=stdlib " + local sudo_pip="sudo -H LC_ALL=en_US.UTF-8" echo "Using python $PYTHON3_VERSION to install $package_dir" fi @@ -186,15 +206,11 @@ function pip_install { $xtrace - # adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep - # the same behaviour of setuptools before version 25.0.0. - # related issue: https://github.com/pypa/pip/issues/3874 $sudo_pip \ http_proxy="${http_proxy:-}" \ https_proxy="${https_proxy:-}" \ no_proxy="${no_proxy:-}" \ PIP_FIND_LINKS=$PIP_FIND_LINKS \ - SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \ $cmd_pip $upgrade \ $@ result=$? @@ -257,8 +273,7 @@ function use_library_from_git { function lib_installed_from_git { local name=$1 local safe_name - safe_name=$(python -c "from pkg_resources import safe_name; \ - print(safe_name('${name}'))") + safe_name=$(python -c "from packaging import canonicalize_name; print(canonicalize_name('${name}'))") # Note "pip freeze" doesn't always work here, because it tries to # be smart about finding the remote of the git repo the package # was installed from. This doesn't work with zuul which clones @@ -383,6 +398,9 @@ function _setup_package_with_constraints_edit { # source we are about to do. local name name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) + if [ -z $name ]; then + name=$(awk '/^name =/ {gsub(/"/, "", $3); print $3}' $project_dir/pyproject.toml) + fi $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ $REQUIREMENTS_DIR/upper-constraints.txt -- $name fi @@ -445,8 +463,11 @@ function setup_package { pip_install $flags "$project_dir$extras" # ensure that further actions can do things like setup.py sdist - if [[ "$flags" == "-e" ]]; then - safe_chown -R $STACK_USER $1/*.egg-info + if [[ "$flags" == "-e" && "$GLOBAL_VENV" == "False" ]]; then + # egg-info is not created when project have pyproject.toml + if [ -d $1/*.egg-info ]; then + safe_chown -R $STACK_USER $1/*.egg-info + fi fi } @@ -466,14 +487,8 @@ function install_python { function install_python3 { if is_ubuntu; then apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev - elif is_suse; then - install_package python3-devel python3-dbm elif is_fedora; then - if [ "$os_VENDOR" = "Fedora" ]; then - install_package python${PYTHON3_VERSION//.} - else - install_package python${PYTHON3_VERSION//.} python${PYTHON3_VERSION//.}-devel - fi + install_package python${PYTHON3_VERSION}-devel python${PYTHON3_VERSION}-pip fi } diff --git a/inc/rootwrap b/inc/rootwrap index 2a6e4b648f..4c65440a4e 100644 --- a/inc/rootwrap +++ b/inc/rootwrap @@ -60,6 +60,11 @@ function configure_rootwrap { sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.conf /etc/${project}/rootwrap.conf sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i /etc/${project}/rootwrap.conf + # Set up the rootwrap sudoers local tempfile tempfile=$(mktemp) diff --git a/lib/apache b/lib/apache index 02827d1f1b..b3379a7cde 100644 --- a/lib/apache +++ b/lib/apache @@ -44,10 +44,6 @@ elif is_fedora; then APACHE_NAME=httpd APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/conf.d} APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d} -elif is_suse; then - APACHE_NAME=apache2 - APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/vhosts.d} - APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d} fi APACHE_LOG_DIR="/var/log/${APACHE_NAME}" @@ -57,18 +53,16 @@ APACHE_LOG_DIR="/var/log/${APACHE_NAME}" # Enable apache mod and restart apache if it isn't already enabled. function enable_apache_mod { local mod=$1 + local should_restart=$2 # Apache installation, because we mark it NOPRIME if is_ubuntu; then # Skip mod_version as it is not a valid mod to enable # on debuntu, instead it is built in. if [[ "$mod" != "version" ]] && ! a2query -m $mod ; then sudo a2enmod $mod - restart_apache_server - fi - elif is_suse; then - if ! a2enmod -q $mod ; then - sudo a2enmod $mod - restart_apache_server + if [[ "$should_restart" != "norestart" ]] ; then + restart_apache_server + fi fi elif is_fedora; then # pass @@ -88,14 +82,14 @@ function install_apache_uwsgi { fi if is_ubuntu; then - local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi" + local pkg_list="uwsgi uwsgi-plugin-python3" install_package ${pkg_list} # NOTE(ianw) 2022-02-03 : Fedora 35 needs to skip this and fall # into the install-from-source because the upstream packages # didn't fix Python 3.10 compatibility before release. Should be # fixed in uwsgi 4.9.0; can remove this when packages available # or we drop this release - elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f35 ]]; then + elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ rhel9 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: @@ -104,10 +98,6 @@ function install_apache_uwsgi { # Thus there is nothing else to do after this install install_package uwsgi \ uwsgi-plugin-python3 - elif [[ $os_VENDOR =~ openSUSE ]]; then - install_package uwsgi \ - uwsgi-python3 \ - apache2-mod_uwsgi else # Compile uwsgi from source. local dir @@ -125,16 +115,14 @@ function install_apache_uwsgi { sudo rm -rf $dir fi - if is_ubuntu || is_suse ; then - # we've got to enable proxy and proxy_uwsgi for this to work - sudo a2enmod proxy - sudo a2enmod proxy_uwsgi - elif is_fedora; then - # redhat is missing a nice way to turn on/off modules - echo "LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so" \ - | sudo tee /etc/httpd/conf.modules.d/02-proxy-uwsgi.conf + if is_ubuntu; then + if ! a2query -m proxy || ! a2query -m proxy_uwsgi ; then + # we've got to enable proxy and proxy_uwsgi for this to work + sudo a2enmod proxy + sudo a2enmod proxy_uwsgi + restart_apache_server + fi fi - restart_apache_server } # install_apache_wsgi() - Install Apache server and wsgi module @@ -149,14 +137,14 @@ function install_apache_wsgi { install_package libapache2-mod-wsgi-py3 elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd python3-mod_wsgi + install_package httpd python${PYTHON3_VERSION}-mod_wsgi + # rpm distros dont enable httpd by default so enable it to support reboots. + sudo systemctl enable httpd # For consistency with Ubuntu, switch to the worker mpm, as # the default is event sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf sudo sed -i '/mod_mpm_event.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf sudo sed -i '/mod_mpm_worker.so/s/^#//g' /etc/httpd/conf.modules.d/00-mpm.conf - elif is_suse; then - install_package apache2 apache2-mod_wsgi else exit_distro_not_supported "apache wsgi installation" fi @@ -171,7 +159,7 @@ function install_apache_wsgi { # recognise it. a2ensite and a2dissite ignore the .conf suffix used as parameter. The default sites' # files are 000-default.conf and default-ssl.conf. # -# On Fedora and openSUSE, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. +# On Fedora, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. # # On RHEL and CentOS, things should hopefully work as in Fedora. # @@ -187,7 +175,7 @@ function apache_site_config_for { if is_ubuntu; then # Ubuntu 14.04 - Apache 2.4 echo $APACHE_CONF_DIR/${site}.conf - elif is_fedora || is_suse; then + elif is_fedora; then # fedora conf.d is only imported if it ends with .conf so this is approx the same local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" if [ -f $enabled_site_file ]; then @@ -205,7 +193,7 @@ function enable_apache_site { enable_apache_mod version if is_ubuntu; then sudo a2ensite ${site} - elif is_fedora || is_suse; then + elif is_fedora; then local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" # Do nothing if site already enabled or no site config exists if [[ -f ${enabled_site_file}.disabled ]] && [[ ! -f ${enabled_site_file} ]]; then @@ -219,7 +207,7 @@ function disable_apache_site { local site=$@ if is_ubuntu; then sudo a2dissite ${site} || true - elif is_fedora || is_suse; then + elif is_fedora; then local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" # Do nothing if no site config exists if [[ -f ${enabled_site_file} ]]; then @@ -250,13 +238,17 @@ function restart_apache_server { restart_service $APACHE_NAME } +# write_uwsgi_config() - Create a new uWSGI config file function write_uwsgi_config { - local file=$1 + local conf=$1 local wsgi=$2 local url=$3 local http=$4 - local name="" - name=$(basename $wsgi) + local name=$5 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi # create a home for the sockets; note don't use /tmp -- apache has # a private view of it on some platforms. @@ -271,39 +263,49 @@ function write_uwsgi_config { local socket="$socket_dir/${name}.socket" # always cleanup given that we are using iniset here - rm -rf $file - iniset "$file" uwsgi wsgi-file "$wsgi" - iniset "$file" uwsgi processes $API_WORKERS + rm -rf $conf + # Set either the module path or wsgi script path depending on what we've + # been given. Note that the regex isn't exhaustive - neither Python modules + # nor Python variables can start with a number - but it's "good enough" + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi + iniset "$conf" uwsgi processes $API_WORKERS # This is running standalone - iniset "$file" uwsgi master true + iniset "$conf" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$file" uwsgi die-on-term true - iniset "$file" uwsgi exit-on-reload false + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false # Set worker-reload-mercy so that worker will not exit till the time # configured after graceful shutdown - iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT - iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins http,python3 + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 # uwsgi recommends this to prevent thundering herd on accept. - iniset "$file" uwsgi thunder-lock true + iniset "$conf" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM - iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" # Override the default size for headers from the 4k default. - iniset "$file" uwsgi buffer-size 65535 + iniset "$conf" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. - iniset "$file" uwsgi add-header "Connection: close" + iniset "$conf" uwsgi add-header "Connection: close" # This ensures that file descriptors aren't shared between processes. - iniset "$file" uwsgi lazy-apps true + iniset "$conf" uwsgi lazy-apps true + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t # If we said bind directly to http, then do that and don't start the apache proxy if [[ -n "$http" ]]; then - iniset "$file" uwsgi http $http + iniset "$conf" uwsgi http $http else local apache_conf="" apache_conf=$(apache_site_config_for $name) - iniset "$file" uwsgi socket "$socket" - iniset "$file" uwsgi chmod-socket 666 - echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 " | sudo tee -a $apache_conf + iniset "$conf" uwsgi socket "$socket" + iniset "$conf" uwsgi chmod-socket 666 + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server fi @@ -312,51 +314,62 @@ function write_uwsgi_config { # For services using chunked encoding, the only services known to use this # currently are Glance and Swift, we need to use an http proxy instead of # mod_proxy_uwsgi because the chunked encoding gets dropped. See: -# https://github.com/unbit/uwsgi/issues/1540 You can workaround this on python2 -# but that involves having apache buffer the request before sending it to -# uwsgi. +# https://github.com/unbit/uwsgi/issues/1540. function write_local_uwsgi_http_config { - local file=$1 + local conf=$1 local wsgi=$2 local url=$3 - name=$(basename $wsgi) + local name=$4 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi # create a home for the sockets; note don't use /tmp -- apache has # a private view of it on some platforms. # always cleanup given that we are using iniset here - rm -rf $file - iniset "$file" uwsgi wsgi-file "$wsgi" + rm -rf $conf + # Set either the module path or wsgi script path depending on what we've + # been given + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi port=$(get_random_port) - iniset "$file" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" - iniset "$file" uwsgi processes $API_WORKERS + iniset "$conf" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" + iniset "$conf" uwsgi processes $API_WORKERS # This is running standalone - iniset "$file" uwsgi master true + iniset "$conf" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$file" uwsgi die-on-term true - iniset "$file" uwsgi exit-on-reload false - iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins http,python3 - # uwsgi recommends this to prevent thundering herd on accept. - iniset "$file" uwsgi thunder-lock true - # Set hook to trigger graceful shutdown on SIGTERM - iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false # Set worker-reload-mercy so that worker will not exit till the time # configured after graceful shutdown - iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 + # uwsgi recommends this to prevent thundering herd on accept. + iniset "$conf" uwsgi thunder-lock true + # Set hook to trigger graceful shutdown on SIGTERM + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" # Override the default size for headers from the 4k default. - iniset "$file" uwsgi buffer-size 65535 + iniset "$conf" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. - iniset "$file" uwsgi add-header "Connection: close" + iniset "$conf" uwsgi add-header "Connection: close" # This ensures that file descriptors aren't shared between processes. - iniset "$file" uwsgi lazy-apps true - iniset "$file" uwsgi chmod-socket 666 - iniset "$file" uwsgi http-raw-body true - iniset "$file" uwsgi http-chunked-input true - iniset "$file" uwsgi http-auto-chunked true - iniset "$file" uwsgi http-keepalive false + iniset "$conf" uwsgi lazy-apps true + iniset "$conf" uwsgi chmod-socket 666 + iniset "$conf" uwsgi http-raw-body true + iniset "$conf" uwsgi http-chunked-input true + iniset "$conf" uwsgi http-auto-chunked true + iniset "$conf" uwsgi http-keepalive false # Increase socket timeout for slow chunked uploads - iniset "$file" uwsgi socket-timeout 30 + iniset "$conf" uwsgi socket-timeout 30 + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t enable_apache_mod proxy enable_apache_mod proxy_http @@ -364,7 +377,7 @@ function write_local_uwsgi_http_config { apache_conf=$(apache_site_config_for $name) echo "KeepAlive Off" | sudo tee $apache_conf echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf - echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server } @@ -383,18 +396,24 @@ function write_local_proxy_http_config { echo "KeepAlive Off" | sudo tee $apache_conf echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf - echo "ProxyPass \"${loc}\" \"$url\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${loc}\" \"$url\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server } function remove_uwsgi_config { - local file=$1 + local conf=$1 local wsgi=$2 local name="" + # TODO(stephenfin): Remove this call when everyone is using module path + # configuration instead of file path configuration name=$(basename $wsgi) - rm -rf $file + if [[ "$wsgi" = /* ]]; then + deprecated "Passing a wsgi script to remove_uwsgi_config is deprecated, pass an application name instead" + fi + + rm -rf $conf disable_apache_site $name } diff --git a/lib/atop b/lib/atop new file mode 100644 index 0000000000..25c8e9a83f --- /dev/null +++ b/lib/atop @@ -0,0 +1,49 @@ +#!/bin/bash +# +# lib/atop +# Functions to start and stop atop + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - configure_atop +# - install_atop +# - start_atop +# - stop_atop + +# Save trace setting +_XTRACE_ATOP=$(set +o | grep xtrace) +set +o xtrace + +function configure_atop { + mkdir -p $LOGDIR/atop + cat </dev/null +# /etc/default/atop +# see man atoprc for more possibilities to configure atop execution + +LOGOPTS="-R" +LOGINTERVAL=${ATOP_LOGINTERVAL:-"30"} +LOGGENERATIONS=${ATOP_LOGGENERATIONS:-"1"} +LOGPATH=$LOGDIR/atop +EOF +} + +function install_atop { + install_package atop +} + +# start_() - Start running processes +function start_atop { + start_service atop +} + +# stop_atop() stop atop process +function stop_atop { + stop_service atop +} + +# Restore xtrace +$_XTRACE_ATOP diff --git a/lib/cinder b/lib/cinder index 52818a81eb..ca641c2949 100644 --- a/lib/cinder +++ b/lib/cinder @@ -43,6 +43,13 @@ GITDIR["python-cinderclient"]=$DEST/python-cinderclient GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext CINDER_DIR=$DEST/cinder +if [[ $SERVICE_IP_VERSION == 6 ]]; then + CINDER_MY_IP="$HOST_IPV6" +else + CINDER_MY_IP="$HOST_IP" +fi + + # Cinder virtual environment if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["cinder"]=${CINDER_DIR}.venv @@ -52,10 +59,11 @@ else fi CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} +OS_BRICK_LOCK_PATH=${OS_BRICK_LOCK_PATH:=$DATA_DIR/os_brick} CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf -CINDER_UWSGI=$CINDER_BIN_DIR/cinder-wsgi +CINDER_UWSGI=cinder.wsgi.api:application CINDER_UWSGI_CONF=$CINDER_CONF_DIR/cinder-api-uwsgi.ini CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini @@ -69,6 +77,11 @@ CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +CINDER_SERVICE_REPORT_INTERVAL=${CINDER_SERVICE_REPORT_INTERVAL:-120} + # What type of LVM device should Cinder use for LVM backend # Defaults to auto, which will do thin provisioning if it's a fresh # volume group, otherwise it will do thick. The other valid choices are @@ -76,6 +89,10 @@ CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $S # thin provisioning. CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-auto} +# ``CINDER_USE_SERVICE_TOKEN`` is a mode where service token is passed along with +# user token while communicating to external REST APIs like Glance. +CINDER_USE_SERVICE_TOKEN=$(trueorfalse True CINDER_USE_SERVICE_TOKEN) + # Default backends # The backend format is type:name where type is one of the supported backend # types (lvm, nfs, etc) and name is the identifier used in the Cinder @@ -88,13 +105,33 @@ CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') -# Default to lioadm -CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} +VOLUME_TYPE_MULTIATTACH=${VOLUME_TYPE_MULTIATTACH:-multiattach} -# EL and SUSE should only use lioadm -if is_fedora || is_suse; then - if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then - die "lioadm is the only valid Cinder target_helper config on this platform" +if [[ -n "$CINDER_ISCSI_HELPER" ]]; then + if [[ -z "$CINDER_TARGET_HELPER" ]]; then + deprecated 'Using CINDER_ISCSI_HELPER is deprecated, use CINDER_TARGET_HELPER instead' + CINDER_TARGET_HELPER="$CINDER_ISCSI_HELPER" + else + deprecated 'Deprecated CINDER_ISCSI_HELPER is set, but is being overwritten by CINDER_TARGET_HELPER' + fi +fi +CINDER_TARGET_HELPER=${CINDER_TARGET_HELPER:-lioadm} + +if [[ $CINDER_TARGET_HELPER == 'nvmet' ]]; then + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'nvmet_rdma'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'nvme-subsystem-1'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-4420} +else + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'iscsi'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'iqn.2010-10.org.openstack:'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-3260} +fi + + +# EL should only use lioadm +if is_fedora; then + if [[ ${CINDER_TARGET_HELPER} != "lioadm" && ${CINDER_TARGET_HELPER} != 'nvmet' ]]; then + die "lioadm and nvmet are the only valid Cinder target_helper config on this platform" fi fi @@ -124,10 +161,6 @@ fi # Supported backup drivers are in lib/cinder_backups CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift} -# Toggle for deploying Cinder under a wsgi server. Legacy mod_wsgi -# reference should be cleaned up to more accurately refer to uwsgi. -CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-True} - # Source the enabled backends if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then for be in ${CINDER_ENABLED_BACKENDS//,/ }; do @@ -151,6 +184,12 @@ fi # Environment variables to configure the image-volume cache CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True} +# Environment variables to configure the optimized volume upload +CINDER_UPLOAD_OPTIMIZED=${CINDER_UPLOAD_OPTIMIZED:-False} + +# Environment variables to configure the internal tenant during optimized volume upload +CINDER_UPLOAD_INTERNAL_TENANT=${CINDER_UPLOAD_INTERNAL_TENANT:-False} + # For limits, if left unset, it will use cinder defaults of 0 for unlimited CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-} CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} @@ -160,6 +199,11 @@ CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} # enable the cache for all cinder backends. CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS} +# Configure which cinder backends will have optimized volume upload, this takes the same +# form as the CINDER_ENABLED_BACKENDS config option. By default it will +# enable the cache for all cinder backends. +CINDER_UPLOAD_OPTIMIZED_BACKENDS=${CINDER_UPLOAD_OPTIMIZED_BACKENDS:-$CINDER_ENABLED_BACKENDS} + # Flag to set the oslo_policy.enforce_scope. This is used to switch # the Volume API policies to start checking the scope of token. by default, # this flag is False. @@ -187,7 +231,7 @@ function _cinder_cleanup_apache_wsgi { function cleanup_cinder { # ensure the volume group is cleared up because fails might # leave dead volumes in the group - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then local targets targets=$(sudo tgtadm --op show --mode target) if [ $? -ne 0 ]; then @@ -215,8 +259,14 @@ function cleanup_cinder { else stop_service tgtd fi - else + elif [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + else + die $LINENO "Unknown value \"$CINDER_TARGET_HELPER\" for CINDER_TARGET_HELPER" fi if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then @@ -237,7 +287,7 @@ function cleanup_cinder { fi stop_process "c-api" - remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" + remove_uwsgi_config "$CINDER_UWSGI_CONF" "cinder-wsgi" } # configure_cinder() - Set config files, create data dirs, etc @@ -254,32 +304,18 @@ function configure_cinder { cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI - inicomment $CINDER_API_PASTE_INI filter:authtoken auth_host - inicomment $CINDER_API_PASTE_INI filter:authtoken auth_port - inicomment $CINDER_API_PASTE_INI filter:authtoken auth_protocol - inicomment $CINDER_API_PASTE_INI filter:authtoken cafile - inicomment $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name - inicomment $CINDER_API_PASTE_INI filter:authtoken admin_user - inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password - inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir - configure_keystone_authtoken_middleware $CINDER_CONF cinder iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $CINDER_CONF DEFAULT target_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF DEFAULT target_helper "$CINDER_TARGET_HELPER" iniset $CINDER_CONF database connection `database_connection_url cinder` iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions - iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH - if [[ $SERVICE_IP_VERSION == 6 ]]; then - iniset $CINDER_CONF DEFAULT my_ip "$HOST_IPV6" - else - iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP" - fi + iniset $CINDER_CONF DEFAULT my_ip "$CINDER_MY_IP" iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then @@ -296,6 +332,9 @@ function configure_cinder { # details and example failures. iniset $CINDER_CONF DEFAULT rpc_response_timeout 120 + iniset $CINDER_CONF DEFAULT report_interval $CINDER_SERVICE_REPORT_INTERVAL + iniset $CINDER_CONF DEFAULT service_down_time $(($CINDER_SERVICE_REPORT_INTERVAL * 6)) + if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then local enabled_backends="" local default_name="" @@ -316,6 +355,14 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT default_volume_type ${default_name} fi configure_cinder_image_volume_cache + + # The upload optimization uses Cinder's clone volume functionality to + # clone the Image-Volume from source volume hence can only be + # performed when glance is using cinder as it's backend. + if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # Configure optimized volume upload + configure_cinder_volume_upload + fi fi if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then @@ -333,14 +380,7 @@ function configure_cinder { if is_service_enabled tls-proxy; then if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then # Set the service port for a proxy to take the original - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT - iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True - else - iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT - iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT - iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT - fi + iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True fi fi @@ -351,80 +391,81 @@ function configure_cinder { iniset_rpc_backend cinder $CINDER_CONF # Format logging - setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI + setup_logging $CINDER_CONF if is_service_enabled c-api; then - write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" + write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" "" "cinder-api" fi if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then configure_cinder_driver fi - iniset $CINDER_CONF DEFAULT osapi_volume_workers "$API_WORKERS" - iniset $CINDER_CONF DEFAULT glance_api_servers "$GLANCE_URL" if is_service_enabled tls-proxy; then iniset $CINDER_CONF DEFAULT glance_protocol https iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE fi + # Set glance credentials (used for location APIs) + configure_keystone_authtoken_middleware $CINDER_CONF glance glance + # Set nova credentials (used for os-assisted-snapshots) - configure_keystone_authtoken_middleware $CINDER_CONF nova nova + configure_keystoneauth $CINDER_CONF nova nova iniset $CINDER_CONF nova region_name "$REGION_NAME" iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL" elif is_service_enabled etcd3; then - iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT" + # NOTE(jan.gutter): api_version can revert to default once tooz is + # updated with the etcd v3.4 defaults + iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT?api_version=v3" fi if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $CINDER_CONF oslo_policy enforce_scope true iniset $CINDER_CONF oslo_policy enforce_new_defaults true + else + iniset $CINDER_CONF oslo_policy enforce_scope false + iniset $CINDER_CONF oslo_policy enforce_new_defaults false + fi + + if [ "$CINDER_USE_SERVICE_TOKEN" == "True" ]; then + init_cinder_service_user_conf fi } # create_cinder_accounts() - Set up common required cinder accounts -# Tenant User Roles +# Project User Roles # ------------------------------------------------------------------ -# service cinder admin # if enabled +# SERVICE_PROJECT_NAME cinder service +# SERVICE_PROJECT_NAME cinder creator (if Barbican is enabled) # Migrated from keystone_data.sh function create_cinder_accounts { # Cinder if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - create_service_user "cinder" + local extra_role="" - # block-storage is the official service type - get_or_create_service "cinder" "block-storage" "Cinder Volume Service" - if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - get_or_create_endpoint \ - "block-storage" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" - - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" - get_or_create_endpoint \ - "volumev3" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" - else - get_or_create_endpoint \ - "block-storage" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" - - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" - get_or_create_endpoint \ - "volumev3" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" + # cinder needs the "creator" role in order to interact with barbican + if is_service_enabled barbican; then + extra_role=$(get_or_create_role "creator") fi + create_service_user "cinder" $extra_role + + local cinder_api_url + cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume" + + # block-storage is the official service type + get_or_create_service "cinder" "block-storage" "Cinder Volume Service" + get_or_create_endpoint \ + "block-storage" \ + "$REGION_NAME" \ + "$cinder_api_url/v3" configure_cinder_internal_tenant fi } @@ -461,13 +502,28 @@ function init_cinder { mkdir -p $CINDER_STATE_PATH/volumes } + +function init_os_brick { + mkdir -p $OS_BRICK_LOCK_PATH + if is_service_enabled cinder; then + iniset $CINDER_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled nova; then + iniset $NOVA_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled glance; then + iniset $GLANCE_API_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + iniset $GLANCE_CACHE_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi +} + # install_cinder() - Collect source and prepare function install_cinder { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR - if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then + if [[ "$CINDER_TARGET_HELPER" == "tgtadm" ]]; then install_package tgt - elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then + elif [[ "$CINDER_TARGET_HELPER" == "lioadm" ]]; then if is_ubuntu; then # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819 sudo mkdir -p /etc/target @@ -476,6 +532,43 @@ function install_cinder { else install_package targetcli fi + elif [[ "$CINDER_TARGET_HELPER" == "nvmet" ]]; then + install_package nvme-cli + + # TODO: Remove manual installation of the dependency when the + # requirement is added to nvmetcli: + # http://lists.infradead.org/pipermail/linux-nvme/2022-July/033576.html + if is_ubuntu; then + install_package python3-configshell-fb + else + install_package python3-configshell + fi + # Install from source because Ubuntu doesn't have the package and some packaged versions didn't work on Python 3 + pip_install git+git://git.infradead.org/users/hch/nvmetcli.git + + sudo modprobe nvmet + sudo modprobe nvme-fabrics + + if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then + install_package rdma-core + sudo modprobe nvme-rdma + + # Create the Soft-RoCE device over the networking interface + local iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $CINDER_MY_IP | awk '{print $1}'`} + if [[ -z "$iface" ]]; then + die $LINENO "Cannot find interface to bind Soft-RoCE" + fi + + if ! sudo rdma link | grep $iface ; then + sudo rdma link add rxe_$iface type rxe netdev $iface + fi + + elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then + sudo modprobe nvme-tcp + + else # 'nvmet_fc' + sudo modprobe nvme-fc + fi fi } @@ -505,25 +598,13 @@ function _configure_tgt_for_config_d { # start_cinder() - Start running processes function start_cinder { - local service_port=$CINDER_SERVICE_PORT - local service_protocol=$CINDER_SERVICE_PROTOCOL - local cinder_url - if is_service_enabled tls-proxy && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - service_port=$CINDER_SERVICE_PORT_INT - service_protocol="http" - fi - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then if is_service_enabled c-vol; then # Delete any old stack.conf sudo rm -f /etc/tgt/conf.d/stack.conf _configure_tgt_for_config_d if is_ubuntu; then sudo service tgt restart - elif is_suse; then - # NOTE(dmllr): workaround restart bug - # https://bugzilla.suse.com/show_bug.cgi?id=934642 - stop_service tgtd - start_service tgtd else restart_service tgtd fi @@ -532,28 +613,24 @@ function start_cinder { fi fi - if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" - cinder_url=$service_protocol://$SERVICE_HOST:$service_port - # Start proxy if tls enabled - if is_service_enabled tls-proxy; then - start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT - fi - else - run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" - cinder_url=$service_protocol://$SERVICE_HOST/volume/v3 - fi - fi + run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" echo "Waiting for Cinder API to start..." + # Check that the cinder API service is running, + local cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST/volume/v3/ + if ! wait_for_service $SERVICE_TIMEOUT $cinder_url; then die $LINENO "c-api did not start" fi run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" - run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" - run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" + # Tune glibc for Python Services using single malloc arena for all threads + # and disabling dynamic thresholds to reduce memory usage when using native + # threads directly or via eventlet.tpool + # https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html + malloc_tuning="MALLOC_ARENA_MAX=1 MALLOC_MMAP_THRESHOLD_=131072 MALLOC_TRIM_THRESHOLD_=262144" + run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" "" "" "$malloc_tuning" + run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" "" "" "$malloc_tuning" # NOTE(jdg): For cinder, startup order matters. To ensure that repor_capabilities is received # by the scheduler start the cinder-volume service last (or restart it) after the scheduler @@ -568,6 +645,23 @@ function stop_cinder { stop_process c-vol } +function create_one_type { + type_name=$1 + property_key=$2 + property_value=$3 + # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode + if is_service_enabled keystone; then + openstack --os-region-name="$REGION_NAME" volume type create --property $property_key="$property_value" $type_name + else + # TODO (e0ne): use openstack client once it will support cinder in noauth mode: + # https://bugs.launchpad.net/python-cinderclient/+bug/1755279 + local cinder_url + cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST/volume/v3 + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create $type_name + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key $type_name set $property_key="$property_value" + fi +} + # create_volume_types() - Create Cinder's configured volume types function create_volume_types { # Create volume types @@ -575,19 +669,13 @@ function create_volume_types { local be be_name for be in ${CINDER_ENABLED_BACKENDS//,/ }; do be_name=${be##*:} - # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode - if is_service_enabled keystone; then - openstack --os-region-name="$REGION_NAME" volume type create --property volume_backend_name="${be_name}" ${be_name} - else - # TODO (e0ne): use openstack client once it will support cinder in noauth mode: - # https://bugs.launchpad.net/python-cinderclient/+bug/1755279 - local cinder_url - cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3 - OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create ${be_name} - OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key ${be_name} set volume_backend_name=${be_name} - fi + create_one_type $be_name "volume_backend_name" $be_name done + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + create_one_type $VOLUME_TYPE_MULTIATTACH $VOLUME_TYPE_MULTIATTACH " True" + fi + # Increase quota for the service project if glance is using cinder, # since it's likely to occasionally go above the default 10 in parallel # test execution. @@ -631,6 +719,23 @@ function configure_cinder_image_volume_cache { done } +function configure_cinder_volume_upload { + # Expect UPLOAD_VOLUME_OPTIMIZED_FOR_BACKENDS to be a list of backends + # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will + # be the backend specific configuration stanza in cinder.conf. + local be be_name + for be in ${CINDER_UPLOAD_OPTIMIZED_BACKENDS//,/ }; do + be_name=${be##*:} + + iniset $CINDER_CONF $be_name image_upload_use_cinder_backend $CINDER_UPLOAD_OPTIMIZED + iniset $CINDER_CONF $be_name image_upload_use_internal_tenant $CINDER_UPLOAD_INTERNAL_TENANT + done +} + +function init_cinder_service_user_conf { + iniset $CINDER_CONF service_user send_service_user_token True + configure_keystoneauth $CINDER_CONF cinder service_user +} # Restore xtrace $_XTRACE_CINDER diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph index 0b465730c0..adcff1883b 100644 --- a/lib/cinder_backends/ceph +++ b/lib/cinder_backends/ceph @@ -40,7 +40,6 @@ function configure_cinder_backend_ceph { iniset $CINDER_CONF $be_name rbd_secret_uuid "$CINDER_CEPH_UUID" iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 - iniset $CINDER_CONF DEFAULT glance_api_version 2 } # Restore xtrace diff --git a/lib/cinder_backends/ceph_iscsi b/lib/cinder_backends/ceph_iscsi index 94412e0da6..10806f26e0 100644 --- a/lib/cinder_backends/ceph_iscsi +++ b/lib/cinder_backends/ceph_iscsi @@ -43,7 +43,6 @@ function configure_cinder_backend_ceph_iscsi { iniset $CINDER_CONF $be_name rbd_iscsi_target_iqn "$CEPH_ISCSI_TARGET_IQN" iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 - iniset $CINDER_CONF DEFAULT glance_api_version 2 pip_install rbd-iscsi-client } diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate index 3ffd9a6785..3b9f1d1164 100644 --- a/lib/cinder_backends/fake_gate +++ b/lib/cinder_backends/fake_gate @@ -50,7 +50,7 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver" iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name - iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index e03ef14c55..42865119da 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -50,7 +50,10 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver" iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name - iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" + iniset $CINDER_CONF $be_name target_protocol "$CINDER_TARGET_PROTOCOL" + iniset $CINDER_CONF $be_name target_port "$CINDER_TARGET_PORT" + iniset $CINDER_CONF $be_name target_prefix "$CINDER_TARGET_PREFIX" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR" } diff --git a/lib/cinder_backends/nfs b/lib/cinder_backends/nfs index 89a37a1f02..f3fcbeff19 100644 --- a/lib/cinder_backends/nfs +++ b/lib/cinder_backends/nfs @@ -32,6 +32,15 @@ function configure_cinder_backend_nfs { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.nfs.NfsDriver" iniset $CINDER_CONF $be_name nfs_shares_config "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" + iniset $CINDER_CONF $be_name nas_host localhost + iniset $CINDER_CONF $be_name nas_share_path ${NFS_EXPORT_DIR} + iniset $CINDER_CONF $be_name nas_secure_file_operations \ + ${NFS_SECURE_FILE_OPERATIONS} + iniset $CINDER_CONF $be_name nas_secure_file_permissions \ + ${NFS_SECURE_FILE_PERMISSIONS} + + # NFS snapshot support is currently opt-in only. + iniset $CINDER_CONF $be_name nfs_snapshot_support True echo "$CINDER_NFS_SERVERPATH" | tee "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" } diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph index e4003c0720..e4d6b96407 100644 --- a/lib/cinder_backups/ceph +++ b/lib/cinder_backups/ceph @@ -19,6 +19,7 @@ set +o xtrace # Defaults # -------- +CINDER_BAK_CEPH_MAX_SNAPSHOTS=${CINDER_BAK_CEPH_MAX_SNAPSHOTS:-0} CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} @@ -26,15 +27,19 @@ CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} function configure_cinder_backup_ceph { - sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} - if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} + # Execute this part only when cephadm is not used + if [[ "$CEPHADM_DEPLOY" = "False" ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} + if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} + fi + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "profile rbd" osd "profile rbd pool=${CINDER_BAK_CEPH_POOL}, profile rbd pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo chown $STACK_USER ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring fi - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF DEFAULT backup_ceph_max_snapshots "$CINDER_BAK_CEPH_MAX_SNAPSHOTS" iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 diff --git a/lib/databases/mysql b/lib/databases/mysql index 6b3ea0287c..4def1842a7 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -18,15 +18,9 @@ register_database mysql if [[ -z "$MYSQL_SERVICE_NAME" ]]; then MYSQL_SERVICE_NAME=mysql - if is_fedora && ! is_oraclelinux; then + if is_fedora; then MYSQL_SERVICE_NAME=mariadb - elif is_suse && systemctl list-unit-files | grep -q 'mariadb\.service'; then - # Older mariadb packages on SLES 12 provided mysql.service. The - # newer ones on SLES 12 and 15 use mariadb.service; they also - # provide a mysql.service symlink for backwards-compatibility, but - # let's not rely on that. - MYSQL_SERVICE_NAME=mariadb - elif [[ "$DISTRO" == "bullseye" ]]; then + elif [[ "$DISTRO" =~ trixie|bookworm|bullseye ]]; then MYSQL_SERVICE_NAME=mariadb fi fi @@ -50,15 +44,9 @@ function cleanup_database_mysql { apt_get purge -y mysql* mariadb* sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql - return - elif is_oraclelinux; then - uninstall_package mysql-community-server - sudo rm -rf /var/lib/mysql - elif is_suse || is_fedora; then + elif is_fedora; then uninstall_package mariadb-server sudo rm -rf /var/lib/mysql - else - return fi } @@ -69,13 +57,11 @@ function recreate_database_mysql { } function configure_database_mysql { - local my_conf mysql slow_log + local my_conf mysql slow_log my_client_conf echo_summary "Configuring and starting MySQL" if is_ubuntu; then my_conf=/etc/mysql/my.cnf - elif is_suse || is_oraclelinux; then - my_conf=/etc/my.cnf elif is_fedora; then my_conf=/etc/my.cnf local cracklib_conf=/etc/my.cnf.d/cracklib_password_check.cnf @@ -86,11 +72,20 @@ function configure_database_mysql { exit_distro_not_supported "mysql configuration" fi + # Set fips mode on + if is_ubuntu; then + if is_fips_enabled; then + my_client_conf=/etc/mysql/mysql.conf.d/mysql.cnf + iniset -sudo $my_client_conf mysql ssl-fips-mode "on" + iniset -sudo $my_conf mysqld ssl-fips-mode "on" + fi + fi + # Change bind-address from localhost (127.0.0.1) to any (::) iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" # (Re)Start mysql-server - if is_fedora || is_suse; then + if is_fedora; then # service is not started by default start_service $MYSQL_SERVICE_NAME elif is_ubuntu; then @@ -98,10 +93,22 @@ function configure_database_mysql { restart_service $MYSQL_SERVICE_NAME fi + # MariaDB 10.4+ on modern Debian/Ubuntu uses unix_socket auth by default + # See https://mariadb.org/authentication-in-mariadb-10-4/ + local use_mariadb_socket_auth=False + if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + if [[ ! "$DISTRO" =~ bookworm|bullseye ]]; then + use_mariadb_socket_auth=True + fi + fi + # Set the root password - only works the first time. For Ubuntu, we already # did that with debconf before installing the package, but we still try, - # because the package might have been installed already. - sudo mysqladmin -u root password $DATABASE_PASSWORD || true + # because the package might have been installed already. We don't do this + # for MariaDB with socket auth because the root password is managed differently. + if [[ "$use_mariadb_socket_auth" != "True" ]]; then + sudo mysqladmin -u root password $DATABASE_PASSWORD || true + fi # In case of Mariadb, giving hostname in arguments causes permission # problems as it expects connection through socket @@ -111,17 +118,32 @@ function configure_database_mysql { local cmd_args="-uroot -p$DATABASE_PASSWORD -h$SERVICE_LOCAL_HOST " fi - # In mariadb e.g. on Ubuntu socket plugin is used for authentication - # as root so it works only as sudo. To restore old "mysql like" behaviour, - # we need to change auth plugin for root user - if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then - sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';" - sudo mysql $cmd_args -e "FLUSH PRIVILEGES;" + # Workaround for mariadb > 11.6.2, + # see https://bugs.launchpad.net/nova/+bug/2116186/comments/3 + min_db_ver="11.6.2" + db_version=$(sudo mysql ${cmd_args} -e "select version();" -sN | cut -d '-' -f 1) + max_db_ver=$(printf '%s\n' ${min_db_ver} ${db_version} | sort -V | tail -n 1) + if [[ "${min_db_ver}" != "${max_db_ver}" ]]; then + iniset -sudo $my_conf mysqld innodb_snapshot_isolation OFF + restart_service $MYSQL_SERVICE_NAME + fi + + # Configure database user authentication + if [[ "$use_mariadb_socket_auth" == "True" ]]; then + # Allow both unix_socket (for sudo mysql) and password auth + # Using OR allows restacking without needing to reset auth in unstack + sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA unix_socket OR mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');" + fi + + # Create remote access user and grant privileges (needed for all distros) + if [[ "$use_mariadb_socket_auth" == "True" ]]; then + # Use sudo mysql since we have socket auth + sudo mysql -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + sudo mysql -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';" + else + sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';" fi - # Create DB user if it does not already exist - sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" - # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: - sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';" # Now update ``my.cnf`` for some local needs and restart the mysql service @@ -151,12 +173,26 @@ function configure_database_mysql { fi if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then - echo "enabling MySQL performance_schema items" - # Enable long query history - iniset -sudo $my_conf mysqld \ - performance-schema-consumer-events-statements-history-long TRUE - iniset -sudo $my_conf mysqld \ - performance_schema_events_stages_history_long_size 1000000 + echo "enabling MySQL performance counting" + + # Install our sqlalchemy plugin + pip_install ${TOP_DIR}/tools/dbcounter + + # Create our stats database for accounting + recreate_database stats + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST -e \ + "CREATE TABLE queries (db VARCHAR(32), op VARCHAR(32), + count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats + fi + + if [[ "$MYSQL_REDUCE_MEMORY" == "True" ]]; then + iniset -sudo $my_conf mysqld read_buffer_size 64K + iniset -sudo $my_conf mysqld innodb_buffer_pool_size 16M + iniset -sudo $my_conf mysqld thread_stack 192K + iniset -sudo $my_conf mysqld thread_cache_size 8 + iniset -sudo $my_conf mysqld tmp_table_size 8M + iniset -sudo $my_conf mysqld sort_buffer_size 8M + iniset -sudo $my_conf mysqld max_allowed_packet 8M fi restart_service $MYSQL_SERVICE_NAME @@ -190,14 +226,9 @@ EOF fi # Install mysql-server if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then - if is_oraclelinux; then - install_package mysql-community-server - elif is_fedora; then + if is_fedora; then install_package mariadb-server mariadb-devel mariadb sudo systemctl enable $MYSQL_SERVICE_NAME - elif is_suse; then - install_package mariadb-server - sudo systemctl enable $MYSQL_SERVICE_NAME elif is_ubuntu; then install_package $MYSQL_SERVICE_NAME-server else @@ -218,7 +249,17 @@ function install_database_python_mysql { function database_connection_url_mysql { local db=$1 - echo "$BASE_SQL_CONN/$db?charset=utf8" + local plugin + + # NOTE(danms): We don't enable perf on subnodes yet because the + # plugin is not installed there + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then + if is_service_enabled mysql; then + plugin="&plugin=dbcounter" + fi + fi + + echo "$BASE_SQL_CONN/$db?charset=utf8$plugin" } diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 4f0a5a0a4c..2aa38ccf76 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -32,7 +32,7 @@ function cleanup_database_postgresql { # Get ruthless with mysql apt_get purge -y postgresql* return - elif is_fedora || is_suse; then + elif is_fedora; then uninstall_package postgresql-server else return @@ -46,6 +46,10 @@ function recreate_database_postgresql { createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E utf8 $db } +function _exit_pg_init { + sudo cat /var/lib/pgsql/initdb_postgresql.log +} + function configure_database_postgresql { local pg_conf pg_dir pg_hba check_role version echo_summary "Configuring and starting PostgreSQL" @@ -53,7 +57,9 @@ function configure_database_postgresql { pg_hba=/var/lib/pgsql/data/pg_hba.conf pg_conf=/var/lib/pgsql/data/postgresql.conf if ! sudo [ -e $pg_hba ]; then + trap _exit_pg_init EXIT sudo postgresql-setup initdb + trap - EXIT fi elif is_ubuntu; then version=`psql --version | cut -d ' ' -f3 | cut -d. -f1-2` @@ -66,11 +72,6 @@ function configure_database_postgresql { pg_dir=`find /etc/postgresql -name pg_hba.conf|xargs dirname` pg_hba=$pg_dir/pg_hba.conf pg_conf=$pg_dir/postgresql.conf - elif is_suse; then - pg_hba=/var/lib/pgsql/data/pg_hba.conf - pg_conf=/var/lib/pgsql/data/postgresql.conf - # initdb is called when postgresql is first started - sudo [ -e $pg_hba ] || start_service postgresql else exit_distro_not_supported "postgresql configuration" fi @@ -107,7 +108,7 @@ EOF if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then if is_ubuntu; then install_package postgresql - elif is_fedora || is_suse; then + elif is_fedora; then install_package postgresql-server if is_fedora; then sudo systemctl enable postgresql diff --git a/lib/dstat b/lib/dstat index eb03ae0fb2..9bd0370847 100644 --- a/lib/dstat +++ b/lib/dstat @@ -33,19 +33,25 @@ function start_dstat { # To enable memory_tracker add: # enable_service memory_tracker # to your localrc - run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" + run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" "PYTHON=python${PYTHON3_VERSION}" # TODO(jh): Fail when using the old service name otherwise consumers might # never notice that is has been removed. if is_service_enabled peakmem_tracker; then die $LINENO "The peakmem_tracker service has been removed, use memory_tracker instead" fi + + # To enable file_tracker add: + # enable_service file_tracker + # to your localrc + run_process file_tracker "$TOP_DIR/tools/file_tracker.sh" } # stop_dstat() stop dstat process function stop_dstat { stop_process dstat stop_process memory_tracker + stop_process file_tracker } # Restore xtrace diff --git a/lib/etcd3 b/lib/etcd3 index 4f3a7a4349..0d22de8c73 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -51,7 +51,7 @@ function start_etcd3 { fi cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT" if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then - cmd+=" --debug" + cmd+=" --log-level=debug" fi local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" diff --git a/lib/glance b/lib/glance index ba98f4133e..9422c22141 100644 --- a/lib/glance +++ b/lib/glance @@ -41,15 +41,20 @@ else GLANCE_BIN_DIR=$(get_python_exec_prefix) fi +#S3 for Glance +GLANCE_USE_S3=$(trueorfalse False GLANCE_USE_S3) +GLANCE_S3_DEFAULT_BACKEND=${GLANCE_S3_DEFAULT_BACKEND:-s3_fast} +GLANCE_S3_BUCKET_ON_PUT=$(trueorfalse True GLANCE_S3_BUCKET_ON_PUT) +GLANCE_S3_BUCKET_NAME=${GLANCE_S3_BUCKET_NAME:-images} + # Cinder for Glance USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE) # GLANCE_CINDER_DEFAULT_BACKEND should be one of the values # from CINDER_ENABLED_BACKENDS GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1} GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance -# NOTE (abhishekk): For opensuse data files are stored in different directory -if is_opensuse; then - GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/etc/glance +if [[ "$GLOBAL_VENV" == "True" ]] ; then + GLANCE_STORE_ROOTWRAP_BASE_DIR=${DEVSTACK_VENV}/etc/glance fi # When Cinder is used as a glance store, you can optionally configure cinder to # optimize bootable volume creation by allowing volumes to be cloned directly @@ -76,13 +81,7 @@ GLANCE_MULTIPLE_FILE_STORES=${GLANCE_MULTIPLE_FILE_STORES:-fast} GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast} GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} - -# Full Glance functionality requires running in standalone mode. If we are -# not in uwsgi mode, then we are standalone, otherwise allow separate control. -if [[ "$WSGI_MODE" != "uwsgi" ]]; then - GLANCE_STANDALONE=True -fi -GLANCE_STANDALONE=${GLANCE_STANDALONE:-False} +GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-centralized_db} # File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store # identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES @@ -99,10 +98,13 @@ GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW) GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS) # Flag to set the oslo_policy.enforce_scope. This is used to switch -# the Image API policies to start checking the scope of token. By Default, -# this flag is False. +# This is used to disable the Image API policies scope and new defaults. +# By Default, it is True. # For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope -GLANCE_ENFORCE_SCOPE=$(trueorfalse False GLANCE_ENFORCE_SCOPE) +GLANCE_ENFORCE_SCOPE=$(trueorfalse True GLANCE_ENFORCE_SCOPE) + +# Flag to disable image format inspection on upload +GLANCE_ENFORCE_IMAGE_FORMAT=$(trueorfalse True GLANCE_ENFORCE_IMAGE_FORMAT) GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs @@ -124,20 +126,13 @@ GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292} GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292} GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api +GLANCE_UWSGI=glance.wsgi.api:application GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini # Glance default limit for Devstack -GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-1000} +GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-2000} -# If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet -# TODO(mtreinish): Remove the eventlet path here and in all the similar -# conditionals below after the Pike release -if [[ "$WSGI_MODE" == "uwsgi" ]]; then - GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" -else - GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" -fi +GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" # Functions # --------- @@ -168,6 +163,35 @@ function cleanup_glance { # Cleanup reserved stores directories sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR fi + remove_uwsgi_config "$GLANCE_UWSGI_CONF" "glance-wsgi-api" +} + +# Set multiple s3 store related config options +# +function configure_multiple_s3_stores { + enabled_backends="${GLANCE_S3_DEFAULT_BACKEND}:s3" + + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends} + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_S3_DEFAULT_BACKEND +} + +# Set common S3 store options to given config section +# +# Arguments: +# config_section +# +function set_common_s3_store_params { + local config_section="$1" + openstack ec2 credential create + iniset $GLANCE_API_CONF $config_section s3_store_host "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT" + iniset $GLANCE_API_CONF $config_section s3_store_access_key "$(openstack ec2 credential list -c Access -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_create_bucket_on_put $GLANCE_S3_BUCKET_ON_PUT + iniset $GLANCE_API_CONF $config_section s3_store_bucket $GLANCE_S3_BUCKET_NAME + iniset $GLANCE_API_CONF $config_section s3_store_bucket_url_format "path" + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF $config_section s3_store_cacert $SSL_BUNDLE_FILE + fi } # Set multiple cinder store related config options for each of the cinder store @@ -254,7 +278,6 @@ function configure_glance_store { local be if [[ "$glance_enable_multiple_stores" == "False" ]]; then - # Configure traditional glance_store if [[ "$use_cinder_for_glance" == "True" ]]; then # set common glance_store parameters iniset $GLANCE_API_CONF glance_store stores "cinder,file,http" @@ -277,7 +300,7 @@ function configure_glance_store { if [[ "$use_cinder_for_glance" == "True" ]]; then # Configure multiple cinder stores for glance configure_multiple_cinder_stores - else + elif ! is_service_enabled s-proxy && [[ "$GLANCE_USE_S3" == "False" ]]; then # Configure multiple file stores for glance configure_multiple_file_stores fi @@ -330,6 +353,7 @@ function configure_glance { iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_API_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement configure_keystone_authtoken_middleware $GLANCE_API_CONF glance @@ -341,6 +365,7 @@ function configure_glance { # Only use these if you know what you are doing! See OSSN-0065 iniset $GLANCE_API_CONF DEFAULT show_image_direct_url $GLANCE_SHOW_DIRECT_URL iniset $GLANCE_API_CONF DEFAULT show_multiple_locations $GLANCE_SHOW_MULTIPLE_LOCATIONS + iniset $GLANCE_API_CONF image_format require_image_format_match $GLANCE_ENFORCE_IMAGE_FORMAT # Configure glance_store configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES @@ -354,8 +379,15 @@ function configure_glance { # No multiple stores for swift yet if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then - # Store the images in swift if enabled. - if is_service_enabled s-proxy; then + # Return if s3api is enabled for glance + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + # set common glance_store parameters + iniset $GLANCE_API_CONF glance_store stores "s3,file,http" + iniset $GLANCE_API_CONF glance_store default_store s3 + fi + elif is_service_enabled s-proxy; then + # Store the images in swift if enabled. iniset $GLANCE_API_CONF glance_store default_store swift iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True @@ -373,6 +405,12 @@ function configure_glance { iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 fi + else + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + configure_multiple_s3_stores + fi + fi fi # We need to tell glance what it's public endpoint is so that the version @@ -393,21 +431,17 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ - iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI - iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME - iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance - iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD + iniset $GLANCE_CACHE_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER # Store specific confs iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ # Set default configuration options for the glance-image-import - iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins [] + iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins "[]" iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON - cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR if is_service_enabled tls-proxy; then @@ -418,24 +452,24 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s" fi - if [[ "$GLANCE_STANDALONE" == False ]]; then - write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" - # Grab our uwsgi listen address and use that to fill out our - # worker_self_reference_url config - iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url \ - $(awk '-F= ' '/^http-socket/ { print "http://"$2}' \ - $GLANCE_UWSGI_CONF) - else - write_local_proxy_http_config glance "http://$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT_INT" "/image" - iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS - iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT - iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" - fi + write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" "glance-api" + + # Grab our uwsgi listen address and use that to fill out our + # worker_self_reference_url config + iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $(awk '-F= ' '/^http-socket/ { print "http://"$2}' $GLANCE_UWSGI_CONF) + + # Configure the Python binary used for "import" plugins. If unset, these + # will attempt the uwsgi binary instead. + iniset $GLANCE_API_CONF wsgi python_interpreter $PYTHON if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $GLANCE_API_CONF oslo_policy enforce_scope true iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true + else + iniset $GLANCE_API_CONF oslo_policy enforce_scope false + iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults false + iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac false fi } @@ -450,7 +484,9 @@ function configure_glance { function create_glance_accounts { if is_service_enabled g-api; then - create_service_user "glance" + # When cinder talk to glance service APIs user needs service + # role for RBAC checks and admin role for cinder to access images. + create_service_user "glance" "admin" # required for swift access if is_service_enabled s-proxy; then @@ -472,6 +508,13 @@ function create_glance_accounts { configure_glance_quotas fi + if is_service_enabled s3api && [[ "$GLANCE_USE_S3" == "True" ]]; then + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then + set_common_s3_store_params glance_store + else + set_common_s3_store_params $GLANCE_S3_DEFAULT_BACKEND + fi + fi fi } @@ -540,7 +583,7 @@ function glance_remote_conf { # start_glance_remote_clone() - Clone the regular glance api worker function start_glance_remote_clone { local glance_remote_conf_dir glance_remote_port remote_data - local glance_remote_uwsgi + local glance_remote_uwsgi venv glance_remote_conf_dir="$(glance_remote_conf "")" glance_remote_port=$(get_random_port) @@ -578,12 +621,16 @@ function start_glance_remote_clone { # We need to create the systemd service for the clone, but then # change it to include an Environment line to point the WSGI app # at the alternate config directory. + if [[ "$GLOBAL_VENV" == True ]]; then + venv="--venv $DEVSTACK_VENV" + fi write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \ --procname-prefix \ glance-api-remote \ - --ini $glance_remote_uwsgi" \ + --ini $glance_remote_uwsgi \ + $venv" \ "" "$STACK_USER" - iniset -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ + iniadd -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ "Service" "Environment" \ "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir" @@ -600,17 +647,8 @@ function start_glance_remote_clone { # start_glance() - Start running processes function start_glance { local service_protocol=$GLANCE_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then - if [[ "$WSGI_MODE" != "uwsgi" ]]; then - start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT - fi - fi - if [[ "$GLANCE_STANDALONE" == False ]]; then - run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" - else - run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR" - fi + run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" if is_service_enabled g-api-r; then echo "Starting the g-api-r clone service..." diff --git a/lib/horizon b/lib/horizon index b2bf7bcb49..7c0d443aa6 100644 --- a/lib/horizon +++ b/lib/horizon @@ -109,12 +109,21 @@ function configure_horizon { _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT "True" fi + if is_service_enabled c-bak; then + _horizon_config_set $local_settings OPENSTACK_CINDER_FEATURES enable_backup "True" + fi + # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole local horizon_conf horizon_conf=$(apache_site_config_for horizon) + local wsgi_venv_config="" + if [[ "$GLOBAL_VENV" == "True" ]] ; then + wsgi_venv_config="WSGIPythonHome $DEVSTACK_VENV" + fi + # Configure apache to run horizon # Set up the django horizon application to serve via apache/wsgi sudo sh -c "sed -e \" @@ -124,12 +133,13 @@ function configure_horizon { s,%APACHE_NAME%,$APACHE_NAME,g; s,%DEST%,$DEST,g; s,%WEBROOT%,$HORIZON_APACHE_ROOT,g; + s,%WSGIPYTHONHOME%,$wsgi_venv_config,g; \" $FILES/apache-horizon.template >$horizon_conf" if is_ubuntu; then disable_apache_site 000-default sudo touch $horizon_conf - elif is_fedora || is_suse; then + elif is_fedora; then : # nothing to do else exit_distro_not_supported "horizon apache configuration" @@ -163,6 +173,10 @@ function install_horizon { # Apache installation, because we mark it NOPRIME install_apache_wsgi + # Install the memcache library so that horizon can use memcached as its + # cache backend + pip_install_gr pymemcache + git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH } diff --git a/lib/host b/lib/host new file mode 100644 index 0000000000..58062eff6b --- /dev/null +++ b/lib/host @@ -0,0 +1,98 @@ +#!/bin/bash + +# Kernel Samepage Merging (KSM) +# ----------------------------- + +# Processes that mark their memory as mergeable can share identical memory +# pages if KSM is enabled. This is particularly useful for nova + libvirt +# backends but any other setup that marks its memory as mergeable can take +# advantage. The drawback is there is higher cpu load; however, we tend to +# be memory bound not cpu bound so enable KSM by default but allow people +# to opt out if the CPU time is more important to them. +ENABLE_KSM=$(trueorfalse True ENABLE_KSM) +ENABLE_KSMTUNED=$(trueorfalse True ENABLE_KSMTUNED) +function configure_ksm { + if [[ $ENABLE_KSMTUNED == "True" ]] ; then + install_package "ksmtuned" + fi + if [[ -f /sys/kernel/mm/ksm/run ]] ; then + echo $(bool_to_int ENABLE_KSM) | sudo tee /sys/kernel/mm/ksm/run + fi +} + +# Compressed swap (ZSWAP) +#------------------------ + +# as noted in the kernel docs https://docs.kernel.org/admin-guide/mm/zswap.html +# Zswap is a lightweight compressed cache for swap pages. +# It takes pages that are in the process of being swapped out and attempts +# to compress them into a dynamically allocated RAM-based memory pool. +# zswap basically trades CPU cycles for potentially reduced swap I/O. +# This trade-off can also result in a significant performance improvement +# if reads from the compressed cache are faster than reads from a swap device. + +ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP) +# lz4 is very fast although it does not have the best compression +# zstd has much better compression but more latency +ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"} +ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="zsmalloc"} +function configure_zswap { + if [[ $ENABLE_ZSWAP == "True" ]] ; then + # Centos 9 stream seems to only support enabling but not run time + # tuning so dont try to choose better default on centos + if is_ubuntu; then + echo ${ZSWAP_COMPRESSOR} | sudo tee /sys/module/zswap/parameters/compressor + echo ${ZSWAP_ZPOOL} | sudo tee /sys/module/zswap/parameters/zpool + fi + echo 1 | sudo tee /sys/module/zswap/parameters/enabled + # print curent zswap kernel config + sudo grep -R . /sys/module/zswap/parameters || /bin/true + fi +} + +ENABLE_SYSCTL_MEM_TUNING=$(trueorfalse False ENABLE_SYSCTL_MEM_TUNING) +function configure_sysctl_mem_parmaters { + if [[ $ENABLE_SYSCTL_MEM_TUNING == "True" ]] ; then + # defer write when memory is available + sudo sysctl -w vm.dirty_ratio=60 + sudo sysctl -w vm.dirty_background_ratio=10 + sudo sysctl -w vm.vfs_cache_pressure=50 + # assume swap is compressed so on new kernels + # give it equal priority as page cache which is + # uncompressed. on kernels < 5.8 the max is 100 + # not 200 so it will strongly prefer swapping. + sudo sysctl -w vm.swappiness=100 + sudo grep -R . /proc/sys/vm/ || /bin/true + fi +} + +function configure_host_mem { + configure_zswap + configure_ksm + configure_sysctl_mem_parmaters +} + +ENABLE_SYSCTL_NET_TUNING=$(trueorfalse False ENABLE_SYSCTL_NET_TUNING) +function configure_sysctl_net_parmaters { + if [[ $ENABLE_SYSCTL_NET_TUNING == "True" ]] ; then + # detect dead TCP connections after 120 seconds + sudo sysctl -w net.ipv4.tcp_keepalive_time=60 + sudo sysctl -w net.ipv4.tcp_keepalive_intvl=10 + sudo sysctl -w net.ipv4.tcp_keepalive_probes=6 + # reudce network latency for new connections + sudo sysctl -w net.ipv4.tcp_fastopen=3 + # print tcp options + sudo grep -R . /proc/sys/net/ipv4/tcp* || /bin/true + # disable qos by default + sudo sysctl -w net.core.default_qdisc=pfifo_fast + fi +} + +function configure_host_net { + configure_sysctl_net_parmaters +} + +function tune_host { + configure_host_mem + configure_host_net +} diff --git a/lib/infra b/lib/infra index b983f2b739..f4760c352c 100644 --- a/lib/infra +++ b/lib/infra @@ -31,7 +31,7 @@ function install_infra { local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv" [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV # We don't care about testing git pbr in the requirements venv. - PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr setuptools[core] PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR # Unset the PIP_VIRTUAL_ENV so that PBR does not end up trapped diff --git a/lib/keystone b/lib/keystone index 80a136f78d..791abeb0d7 100644 --- a/lib/keystone +++ b/lib/keystone @@ -11,13 +11,11 @@ # - ``FILES`` # - ``BASE_SQL_CONN`` # - ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` -# - ``S3_SERVICE_PORT`` (template backend only) # ``stack.sh`` calls the entry points in this order: # # - install_keystone # - configure_keystone -# - _config_keystone_apache_wsgi # - init_keystone # - start_keystone # - bootstrap_keystone @@ -49,16 +47,7 @@ fi KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini -KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public - -# KEYSTONE_DEPLOY defines how keystone is deployed, allowed values: -# - mod_wsgi : Run keystone under Apache HTTPd mod_wsgi -# - uwsgi : Run keystone under uwsgi -if [[ "$WSGI_MODE" == "uwsgi" ]]; then - KEYSTONE_DEPLOY=uwsgi -else - KEYSTONE_DEPLOY=mod_wsgi -fi +KEYSTONE_PUBLIC_UWSGI=keystone.wsgi.api:application # Select the Identity backend driver KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql} @@ -144,47 +133,9 @@ function is_keystone_enabled { # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_keystone { - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - # These files will be created if we are running WSGI_MODE="mod_wsgi" - disable_apache_site keystone - sudo rm -f $(apache_site_config_for keystone) - else - stop_process "keystone" - remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" - sudo rm -f $(apache_site_config_for keystone-wsgi-public) - fi -} - -# _config_keystone_apache_wsgi() - Set WSGI config files of Keystone -function _config_keystone_apache_wsgi { - local keystone_apache_conf - keystone_apache_conf=$(apache_site_config_for keystone) - keystone_ssl_listen="#" - local keystone_ssl="" - local keystone_certfile="" - local keystone_keyfile="" - local keystone_service_port=$KEYSTONE_SERVICE_PORT - local venv_path="" - - if is_service_enabled tls-proxy; then - keystone_service_port=$KEYSTONE_SERVICE_PORT_INT - fi - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages" - fi - - sudo cp $FILES/apache-keystone.template $keystone_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$keystone_service_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%SSLLISTEN%|$keystone_ssl_listen|g; - s|%SSLENGINE%|$keystone_ssl|g; - s|%SSLCERTFILE%|$keystone_certfile|g; - s|%SSLKEYFILE%|$keystone_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - s|%KEYSTONE_BIN%|$KEYSTONE_BIN_DIR|g - " -i $keystone_apache_conf + stop_process "keystone" + remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public" + sudo rm -f $(apache_site_config_for keystone-wsgi-public) } # configure_keystone() - Set config files, create data dirs, etc @@ -210,14 +161,11 @@ function configure_keystone { iniset $KEYSTONE_CONF cache backend $CACHE_BACKEND iniset $KEYSTONE_CONF cache memcache_servers $MEMCACHE_SERVERS - iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications + # Enable errors if response validation fails. We want this enabled in CI + # and development contexts to highlights bugs in our response schemas. + iniset $KEYSTONE_CONF api response_validation error - local service_port=$KEYSTONE_SERVICE_PORT - - if is_service_enabled tls-proxy; then - # Set the service ports for a proxy to take the originals - service_port=$KEYSTONE_SERVICE_PORT_INT - fi + iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications # Override the endpoints advertised by keystone so that clients use the correct # endpoint. By default, the keystone server uses the public_port which isn't @@ -241,12 +189,7 @@ function configure_keystone { iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - iniset $KEYSTONE_CONF DEFAULT logging_exception_prefix "%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s" - _config_keystone_apache_wsgi - else # uwsgi - write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" - fi + write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" "" "keystone-api" iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 @@ -265,10 +208,15 @@ function configure_keystone { iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT fi + + iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $KEYSTONE_CONF oslo_policy enforce_scope true iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true - iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + else + iniset $KEYSTONE_CONF oslo_policy enforce_scope false + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults false fi } @@ -436,17 +384,13 @@ function create_service_user { fi } -# Configure a service to use the auth token middleware. +# Configure options for keystoneauth # -# configure_keystone_authtoken_middleware conf_file admin_user IGNORED [section] -# -# section defaults to keystone_authtoken, which is where auth_token looks in -# the .conf file. If the paste config file is used (api-paste.ini) then -# provide the section name for the auth_token filter. -function configure_keystone_authtoken_middleware { +# configure_keystoneauth conf_file admin_user section +function configure_keystoneauth { local conf_file=$1 local admin_user=$2 - local section=${3:-keystone_authtoken} + local section=$3 iniset $conf_file $section auth_type password iniset $conf_file $section interface public @@ -456,9 +400,28 @@ function configure_keystone_authtoken_middleware { iniset $conf_file $section user_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf_file $section project_name $SERVICE_PROJECT_NAME iniset $conf_file $section project_domain_name "$SERVICE_DOMAIN_NAME" +} +# Configure a service to use the auth token middleware. +# +# configure_keystone_authtoken_middleware conf_file admin_user [section] +# +# section defaults to keystone_authtoken, which is where auth_token looks in +# the .conf file. If the paste config file is used (api-paste.ini) then +# provide the section name for the auth_token filter. +function configure_keystone_authtoken_middleware { + local conf_file=$1 + local admin_user=$2 + local section=${3:-keystone_authtoken} + local service_type=$4 + + configure_keystoneauth $conf_file $admin_user $section + iniset $conf_file $section www_authenticate_uri $KEYSTONE_SERVICE_URI iniset $conf_file $section cafile $SSL_BUNDLE_FILE iniset $conf_file $section memcached_servers $MEMCACHE_SERVERS + if [[ -n "$service_type" ]]; then + iniset $conf_file $section service_type $service_type + fi } # configure_auth_token_middleware conf_file admin_user IGNORED [section] @@ -538,36 +501,17 @@ function install_keystone { if is_service_enabled ldap; then setup_develop $KEYSTONE_DIR ldap fi - - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - install_apache_wsgi - fi } # start_keystone() - Start running processes function start_keystone { - # Get right service port for testing - local service_port=$KEYSTONE_SERVICE_PORT - local auth_protocol=$KEYSTONE_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then - service_port=$KEYSTONE_SERVICE_PORT_INT - auth_protocol="http" - fi - - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - enable_apache_site keystone - restart_apache_server - else # uwsgi - run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" - fi + run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" echo "Waiting for keystone to start..." # Check that the keystone service is running. Even if the tls tunnel # should be enabled, make sure the internal port is checked using - # unencryted traffic at this point. - # If running in Apache, use the path rather than port. - - local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v3/ + # unencryted traffic at this point (ignore KEYSTONE_SERVICE_PROTOCOL). + local service_uri=http://$KEYSTONE_SERVICE_HOST/identity/v3/ if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then die $LINENO "keystone did not start" @@ -584,12 +528,7 @@ function start_keystone { # stop_keystone() - Stop running processes function stop_keystone { - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - disable_apache_site keystone - restart_apache_server - else - stop_process keystone - fi + stop_process keystone } # bootstrap_keystone() - Initialize user, role and project @@ -621,7 +560,7 @@ function bootstrap_keystone { # create_ldap_domain() - Create domain file and initialize domain with a user function create_ldap_domain { # Creates domain Users - openstack --os-identity-api-version=3 domain create --description "LDAP domain" Users + openstack domain create --description "LDAP domain" Users # Create domain file inside etc/keystone/domains KEYSTONE_LDAP_DOMAIN_FILE=$KEYSTONE_CONF_DIR/domains/keystone.Users.conf @@ -637,6 +576,7 @@ function create_ldap_domain { iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_name_attribute "cn" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_mail_attribute "mail" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_id_attribute "uid" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_enabled_emulation "True" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user "cn=Manager,dc=openstack,dc=org" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap url "ldap://localhost" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap suffix $LDAP_BASE_DN diff --git a/lib/ldap b/lib/ldap index ea5faa1fe9..66c2afc4d5 100644 --- a/lib/ldap +++ b/lib/ldap @@ -39,13 +39,6 @@ elif is_fedora; then LDAP_OLCDB_NUMBER=2 LDAP_OLCDB_TYPE=hdb LDAP_ROOTPW_COMMAND=add -elif is_suse; then - # SUSE has slappasswd in /usr/sbin/ - PATH=$PATH:/usr/sbin/ - LDAP_OLCDB_NUMBER=1 - LDAP_OLCDB_TYPE=hdb - LDAP_ROOTPW_COMMAND=add - LDAP_SERVICE_NAME=ldap fi @@ -76,8 +69,6 @@ function cleanup_ldap { sudo rm -rf /etc/ldap/ldap.conf /var/lib/ldap elif is_fedora; then sudo rm -rf /etc/openldap /var/lib/ldap - elif is_suse; then - sudo rm -rf /var/lib/ldap fi } @@ -91,6 +82,14 @@ function init_ldap { # Remove data but not schemas clear_ldap_state + if is_ubuntu; then + # a bug in OpenLDAP 2.6.7+ + # (https://bugs.openldap.org/show_bug.cgi?id=10336) causes slapd crash + # after deleting nonexisting tree. It is fixed upstream, but Ubuntu is + # still not having a fix in Noble. Try temporarily simly restarting the + # process. + sudo service $LDAP_SERVICE_NAME restart + fi # Add our top level ldap nodes if ldapsearch -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -b "$LDAP_BASE_DN" | grep -q "Success"; then @@ -126,11 +125,6 @@ function install_ldap { configure_ldap elif is_fedora; then start_ldap - elif is_suse; then - _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$tmp_ldap_dir/suse-base-config.ldif - sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $tmp_ldap_dir/suse-base-config.ldif - sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap - start_ldap fi echo "LDAP_PASSWORD is $LDAP_PASSWORD" diff --git a/lib/libraries b/lib/libraries index 9ea32304fc..ffc004b24c 100755 --- a/lib/libraries +++ b/lib/libraries @@ -1,6 +1,6 @@ #!/bin/bash # -# lib/oslo +# lib/libraries # # Functions to install libraries from git # @@ -27,6 +27,7 @@ GITDIR["castellan"]=$DEST/castellan GITDIR["cliff"]=$DEST/cliff GITDIR["cursive"]=$DEST/cursive GITDIR["debtcollector"]=$DEST/debtcollector +GITDIR["etcd3gw"]=$DEST/etcd3gw GITDIR["futurist"]=$DEST/futurist GITDIR["openstacksdk"]=$DEST/openstacksdk GITDIR["os-client-config"]=$DEST/os-client-config @@ -131,13 +132,12 @@ function install_libs { # python client libraries we might need from git can go here _install_lib_from_source "python-barbicanclient" - - # etcd (because tooz does not have a hard dependency on these) - # - # NOTE(sdague): this is currently a work around because tooz - # doesn't pull in etcd3. - pip_install etcd3 - pip_install etcd3gw + if use_library_from_git etcd3gw ; then + _install_lib_from_source "etcd3gw" + else + # etcd (because tooz does not have a hard dependency on these) + pip_install etcd3gw + fi } # Restore xtrace diff --git a/lib/lvm b/lib/lvm index d3f6bf1792..b7e84d9505 100644 --- a/lib/lvm +++ b/lib/lvm @@ -129,19 +129,25 @@ function init_lvm_volume_group { local vg=$1 local size=$2 - # Start the tgtd service on Fedora and SUSE if tgtadm is used - if is_fedora || is_suse && [[ "$CINDER_ISCSI_HELPER" = "tgtadm" ]]; then + # Start the tgtd service on Fedora if tgtadm is used + if is_fedora; then start_service tgtd fi # Start with a clean volume group _create_lvm_volume_group $vg $size - # Remove iscsi targets - if [ "$CINDER_ISCSI_HELPER" = "lioadm" ]; then - sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete - else - sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete + if is_service_enabled cinder; then + # Remove iscsi targets + if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then + sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then + sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + fi fi _clean_lvm_volume_group $vg } @@ -194,7 +200,7 @@ function set_lvm_filter { filter_string=$filter_string$filter_suffix clean_lvm_filter - sudo sed -i "/# global_filter = \[*\]/a\ $global_filter$filter_string" /etc/lvm/lvm.conf + sudo sed -i "/# global_filter = \[.*\]/a\ $filter_string" /etc/lvm/lvm.conf echo_summary "set lvm.conf device global_filter to: $filter_string" } diff --git a/lib/neutron b/lib/neutron index f24ccfb1a9..6a7f057ad7 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1,122 +1,302 @@ #!/bin/bash # # lib/neutron -# Install and start **Neutron** network services +# functions - functions specific to neutron # Dependencies: -# # ``functions`` file # ``DEST`` must be defined +# ``STACK_USER`` must be defined # ``stack.sh`` calls the entry points in this order: # -# - is_XXXX_enabled -# - install_XXXX -# - configure_XXXX -# - init_XXXX -# - start_XXXX -# - stop_XXXX -# - cleanup_XXXX +# - install_neutron_agent_packages +# - install_neutronclient +# - install_neutron +# - install_neutron_third_party +# - configure_neutron +# - init_neutron +# - configure_neutron_third_party +# - init_neutron_third_party +# - start_neutron_third_party +# - create_nova_conf_neutron +# - configure_neutron_after_post_config +# - start_neutron_service_and_check +# - check_neutron_third_party_integration +# - start_neutron_agents +# - create_neutron_initial_network +# +# ``unstack.sh`` calls the entry points in this order: +# +# - stop_neutron +# - stop_neutron_third_party +# - cleanup_neutron -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace +# Functions in lib/neutron are classified into the following categories: +# +# - entry points (called from stack.sh or unstack.sh) +# - internal functions +# - neutron exercises +# - 3rd party programs -# Defaults + +# Neutron Networking +# ------------------ + +# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want +# to run Neutron on this host, make sure that q-svc is also in +# ``ENABLED_SERVICES``. +# +# See "Neutron Network Configuration" below for additional variables +# that must be set in localrc for connectivity across hosts with +# Neutron. + +# Settings # -------- + +# Neutron Network Configuration +# ----------------------------- + +if is_service_enabled tls-proxy; then + Q_PROTOCOL="https" +fi + # Set up default directories GITDIR["python-neutronclient"]=$DEST/python-neutronclient -# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: -# - False (default) : Run neutron under Eventlet -# - True : Run neutron under uwsgi -# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable -# enough -NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) -NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch} NEUTRON_DIR=$DEST/neutron +NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas + +# Support entry points installation of console scripts +if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then + NEUTRON_BIN_DIR=$NEUTRON_DIR/bin +else + NEUTRON_BIN_DIR=$(get_python_exec_prefix) +fi + +NEUTRON_CONF_DIR=/etc/neutron +NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf +export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} + +NEUTRON_UWSGI=neutron.wsgi.api:application +NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini # If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" # and "enforce_new_defaults" to True in the Neutron's config to enforce usage -# of the new RBAC policies and scopes. -NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE) +# of the new RBAC policies and scopes. Set it to False if you do not +# want to run Neutron with new RBAC. +NEUTRON_ENFORCE_SCOPE=$(trueorfalse True NEUTRON_ENFORCE_SCOPE) + +# Agent binaries. Note, binary paths for other agents are set in per-service +# scripts in lib/neutron_plugins/services/ +AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" +AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} +AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" + +# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and +# loaded from per-plugin scripts in lib/neutron_plugins/ +Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini +# NOTE(slaweq): NEUTRON_DHCP_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_DHCP_CONF=$Q_DHCP_CONF_FILE +Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini +# NOTE(slaweq): NEUTRON_L3_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_L3_CONF=$Q_L3_CONF_FILE +Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini + +# Default name for Neutron database +Q_DB_NAME=${Q_DB_NAME:-neutron} +# Default Neutron Plugin +Q_PLUGIN=${Q_PLUGIN:-ml2} +# Default Neutron Host +Q_HOST=${Q_HOST:-$SERVICE_HOST} +# Default protocol +Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} +# Default listen address +Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +# Default admin username +Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} +# Default auth strategy +Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} +# RHEL's support for namespaces requires using veths with ovs +Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} +Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} +Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) +# Meta data IP +Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} +# Allow Overlapping IP among subnets +Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} +Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} +Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} +VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} +VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} + +# Allow to skip stopping of OVN services +SKIP_STOP_OVN=${SKIP_STOP_OVN:-False} + +# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. +# /etc/neutron is assumed by many of devstack plugins. Do not change. +_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron + +# The name of the service in the endpoint URL +NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} +if [[ -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + NEUTRON_ENDPOINT_SERVICE_NAME="networking" +fi + +# Source install libraries +ALEMBIC_REPO=${ALEMBIC_REPO:-https://github.com/sqlalchemy/alembic.git} +ALEMBIC_DIR=${ALEMBIC_DIR:-$DEST/alembic} +ALEMBIC_BRANCH=${ALEMBIC_BRANCH:-main} +SQLALCHEMY_REPO=${SQLALCHEMY_REPO:-https://github.com/sqlalchemy/sqlalchemy.git} +SQLALCHEMY_DIR=${SQLALCHEMY_DIR:-$DEST/sqlalchemy} +SQLALCHEMY_BRANCH=${SQLALCHEMY_BRANCH:-main} + +# List of config file names in addition to the main plugin config file +# To add additional plugin config files, use ``neutron_server_config_add`` +# utility function. For example: +# +# ``neutron_server_config_add file1`` +# +# These config files are relative to ``/etc/neutron``. The above +# example would specify ``--config-file /etc/neutron/file1`` for +# neutron server. +declare -a -g Q_PLUGIN_EXTRA_CONF_FILES + +# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path. +declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS + + +Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf +if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" +else + NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) + Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + fi +fi + -NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING) # Distributed Virtual Router (DVR) configuration # Can be: -# - ``legacy`` - No DVR functionality -# - ``dvr_snat`` - Controller or single node DVR -# - ``dvr`` - Compute node in multi-node DVR +# - ``legacy`` - No DVR functionality +# - ``dvr_snat`` - Controller or single node DVR +# - ``dvr`` - Compute node in multi-node DVR # - ``dvr_no_external`` - Compute node in multi-node DVR, no external network # -# Default is 'dvr_snat' since it can handle both DVR and legacy routers -NEUTRON_DVR_MODE=${NEUTRON_DVR_MODE:-dvr_snat} - -NEUTRON_BIN_DIR=$(get_python_exec_prefix) -NEUTRON_DHCP_BINARY="neutron-dhcp-agent" +Q_DVR_MODE=${Q_DVR_MODE:-legacy} +if [[ "$Q_DVR_MODE" != "legacy" ]]; then + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population +fi -NEUTRON_CONF_DIR=/etc/neutron -NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf -NEUTRON_META_CONF=$NEUTRON_CONF_DIR/metadata_agent.ini -NEUTRON_META_DATA_HOST=${NEUTRON_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} +# Provider Network Configurations +# -------------------------------- + +# The following variables control the Neutron ML2 plugins' allocation +# of project networks and availability of provider networks. If these +# are not configured in ``localrc``, project networks will be local to +# the host (with no remote connectivity), and no physical resources +# will be available for the allocation of provider networks. + +# To disable tunnels (GRE or VXLAN) for project networks, +# set to False in ``local.conf``. +# GRE tunnels are only supported by the openvswitch. +ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} + +# If using GRE, VXLAN or GENEVE tunnels for project networks, +# specify the range of IDs from which project networks are +# allocated. Can be overridden in ``localrc`` if necessary. +TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} + +# To use VLANs for project networks, set to True in localrc. VLANs +# are supported by the ML2 plugins, requiring additional configuration +# described below. +ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + +# If using VLANs for project networks, set in ``localrc`` to specify +# the range of VLAN VIDs from which project networks are +# allocated. An external network switch must be configured to +# trunk these VLANs between hosts for multi-host connectivity. +# +# Example: ``TENANT_VLAN_RANGE=1000:1999`` +TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} -NEUTRON_DHCP_CONF=$NEUTRON_CONF_DIR/dhcp_agent.ini -NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini -NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/ -NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True} +# If using VLANs for project networks, or if using flat or VLAN +# provider networks, set in ``localrc`` to the name of the physical +# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the +# openvswitch agent, as described below. +# +# Example: ``PHYSICAL_NETWORK=default`` +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} + +# With the openvswitch agent, if using VLANs for project networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the OVS bridge to use for the physical network. The +# bridge will be created if it does not already exist, but a +# physical interface must be manually added to the bridge as a +# port for external connectivity. +# +# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} -NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron} +# With the openvswitch plugin, set to True in ``localrc`` to enable +# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. +# +# Example: ``OVS_ENABLE_TUNNELING=True`` +OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} + +# Use DHCP agent for providing metadata service in the case of +# without L3 agent (No Route Agent), set to True in localrc. +ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} + +# Add a static route as dhcp option, so the request to 169.254.169.254 +# will be able to reach through a route(DHCP agent) +# This option require ENABLE_ISOLATED_METADATA = True +ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} +# Neutron plugin specific functions +# --------------------------------- + +# Please refer to ``lib/neutron_plugins/README.md`` for details. +if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then + source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN +fi -NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini +# Agent metering service plugin functions +# ------------------------------------------- -# By default, use the ML2 plugin -NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2} -NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini} -NEUTRON_CORE_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_CORE_PLUGIN -NEUTRON_CORE_PLUGIN_CONF=$NEUTRON_CORE_PLUGIN_CONF_PATH/$NEUTRON_CORE_PLUGIN_CONF_FILENAME +# Hardcoding for 1 service plugin for now +source $TOP_DIR/lib/neutron_plugins/services/metering -NEUTRON_METERING_AGENT_CONF_FILENAME=${NEUTRON_METERING_AGENT_CONF_FILENAME:-metering_agent.ini} -NEUTRON_METERING_AGENT_CONF=$NEUTRON_CONF_DIR/$NEUTRON_METERING_AGENT_CONF_FILENAME +# L3 Service functions +source $TOP_DIR/lib/neutron_plugins/services/l3 -NEUTRON_AGENT_BINARY=${NEUTRON_AGENT_BINARY:-neutron-$NEUTRON_AGENT-agent} -NEUTRON_L3_BINARY=${NEUTRON_L3_BINARY:-neutron-l3-agent} -NEUTRON_META_BINARY=${NEUTRON_META_BINARY:-neutron-metadata-agent} -NEUTRON_METERING_BINARY=${NEUTRON_METERING_BINARY:-neutron-metering-agent} +# Additional Neutron service plugins +source $TOP_DIR/lib/neutron_plugins/services/placement +source $TOP_DIR/lib/neutron_plugins/services/trunk +source $TOP_DIR/lib/neutron_plugins/services/qos +source $TOP_DIR/lib/neutron_plugins/services/segments +source $TOP_DIR/lib/neutron_plugins/services/ovn-bgp -# Public facing bits -if is_service_enabled tls-proxy; then - NEUTRON_SERVICE_PROTOCOL="https" +# Use security group or not +if has_neutron_plugin_security_group; then + Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} +else + Q_USE_SECGROUP=False fi -NEUTRON_SERVICE_HOST=${NEUTRON_SERVICE_HOST:-$SERVICE_HOST} -NEUTRON_SERVICE_PORT=${NEUTRON_SERVICE_PORT:-9696} -NEUTRON_SERVICE_PORT_INT=${NEUTRON_SERVICE_PORT_INT:-19696} -NEUTRON_SERVICE_PROTOCOL=${NEUTRON_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -NEUTRON_AUTH_STRATEGY=${NEUTRON_AUTH_STRATEGY:-keystone} -NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) -NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf -NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE" -NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE" - -# This is needed because _neutron_ovs_base_configure_l3_agent uses it to create -# an external network bridge -PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} -PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} -# Network type - default vxlan, however enables vlan based jobs to override -# using the legacy environment variable as well as a new variable in greater -# alignment with the naming scheme of this plugin. -NEUTRON_TENANT_NETWORK_TYPE=${NEUTRON_TENANT_NETWORK_TYPE:-vxlan} - -NEUTRON_TENANT_VLAN_RANGE=${NEUTRON_TENANT_VLAN_RANGE:-${TENANT_VLAN_RANGE:-100:150}} - -# Physical network for VLAN network usage. -NEUTRON_PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-} +# OVN_BRIDGE_MAPPINGS - ovn-bridge-mappings +# NOTE(hjensas): Initialize after sourcing neutron_plugins/services/l3 +# which initialize PUBLIC_BRIDGE. +OVN_BRIDGE_MAPPINGS=${OVN_BRIDGE_MAPPINGS:-$PHYSICAL_NETWORK:$PUBLIC_BRIDGE} +# Save trace setting +_XTRACE_NEUTRON=$(set +o | grep xtrace) +set +o xtrace -# Additional neutron api config files -declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS # Functions # --------- @@ -130,303 +310,200 @@ function is_neutron_enabled { } # Test if any Neutron services are enabled -# is_neutron_enabled +# TODO(slaweq): this is not really needed now and we should remove it as soon +# as it will not be called from any other Devstack plugins, like e.g. Neutron +# plugin function is_neutron_legacy_enabled { - # first we need to remove all "neutron-" from DISABLED_SERVICES list - disabled_services_copy=$(echo $DISABLED_SERVICES | sed 's/neutron-//g') - [[ ,${disabled_services_copy} =~ ,"neutron" ]] && return 1 - [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 - return 1 + return 0 } -if is_neutron_legacy_enabled; then - source $TOP_DIR/lib/neutron-legacy -fi - -# cleanup_neutron() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent - if is_neutron_ovs_base_plugin; then - neutron_ovs_base_cleanup +function _determine_config_server { + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + else + die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + fi fi - - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - neutron_lb_cleanup + if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead." fi - # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do - sudo ip netns delete ${ns} + for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file) done -} -# configure_root_helper_options() - Configure agent rootwrap helper options -function configure_root_helper_options { - local conffile=$1 - iniset $conffile agent root_helper "sudo $NEUTRON_ROOTWRAP_CMD" - iniset $conffile agent root_helper_daemon "sudo $NEUTRON_ROOTWRAP_DAEMON_CMD" + local cfg_file + local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do + opts+=" --config-file $cfg_file" + done + echo "$opts" } -# configure_neutron() - Set config files, create data dirs, etc -function configure_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR - - (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - - cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF - - configure_neutron_rootwrap - - mkdir -p $NEUTRON_CORE_PLUGIN_CONF_PATH +function _determine_config_l3 { + local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" + echo "$opts" +} - # NOTE(yamamoto): A decomposed plugin should prepare the config file in - # its devstack plugin. - if [ -f $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample ]; then - cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample $NEUTRON_CORE_PLUGIN_CONF +function _run_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + run_process neutron-ovn-maintenance-worker "$NEUTRON_BIN_DIR/neutron-ovn-maintenance-worker $cfg_file_options" fi +} - iniset $NEUTRON_CONF database connection `database_connection_url neutron` - iniset $NEUTRON_CONF DEFAULT state_path $NEUTRON_STATE_PATH - iniset $NEUTRON_CONF oslo_concurrency lock_path $NEUTRON_STATE_PATH/lock - iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG +function _stop_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + stop_process neutron-ovn-maintenance-worker + fi +} - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL +# For services and agents that require it, dynamically construct a list of +# --config-file arguments that are passed to the binary. +function determine_config_files { + local opts="" + case "$1" in + "neutron-server") opts="$(_determine_config_server)" ;; + "neutron-l3-agent") opts="$(_determine_config_l3)" ;; + esac + if [ -z "$opts" ] ; then + die $LINENO "Could not determine config files for $1." + fi + echo "$opts" +} +# configure_neutron() +# Set common config for all neutron server and agents. +function configure_neutron { + _configure_neutron_common iniset_rpc_backend neutron $NEUTRON_CONF - # Neutron API server & Neutron plugin - if is_service_enabled neutron-api; then - local policy_file=$NEUTRON_CONF_DIR/policy.json - # Allow neutron user to administer neutron to match neutron account - # NOTE(amotoki): This is required for nova works correctly with neutron. - if [ -f $NEUTRON_DIR/etc/policy.json ]; then - cp $NEUTRON_DIR/etc/policy.json $policy_file - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $policy_file - else - echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $policy_file - fi - - cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini - - iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN - - iniset $NEUTRON_CONF DEFAULT policy_file $policy_file - iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True - iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING - - iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY - configure_keystone_authtoken_middleware $NEUTRON_CONF neutron - configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova - - # Configure tenant network type - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types $NEUTRON_TENANT_NETWORK_TYPE - - local mech_drivers="openvswitch" - if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then - mech_drivers+=",l2population" - else - mech_drivers+=",linuxbridge" - fi - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers - - iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 - iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME - if [[ "$NEUTRON_TENANT_NETWORK_TYPE" =~ "vlan" ]] && [[ "$NEUTRON_PHYSICAL_NETWORK" != "" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges ${NEUTRON_PHYSICAL_NETWORK}:${NEUTRON_TENANT_VLAN_RANGE} - fi - if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then - neutron_ml2_extension_driver_add port_security - fi - configure_rbac_policies - fi - - # Neutron OVS or LB agent - if is_service_enabled neutron-agent; then - iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan - iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - configure_root_helper_options $NEUTRON_CORE_PLUGIN_CONF - - # Configure the neutron agent - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables - iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP - elif [[ $NEUTRON_AGENT == "openvswitch" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver openvswitch - iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP - - if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True - iniset $NEUTRON_CORE_PLUGIN_CONF agent enable_distributed_routing True - iniset $NEUTRON_CORE_PLUGIN_CONF agent arp_responder True - fi - fi - - if ! running_in_container; then - enable_kernel_bridge_firewall - fi + if is_service_enabled q-metering neutron-metering; then + _configure_neutron_metering fi - - # DHCP Agent - if is_service_enabled neutron-dhcp; then - cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $NEUTRON_DHCP_CONF - - iniset $NEUTRON_DHCP_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - # make it so we have working DNS from guests - iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True - - configure_root_helper_options $NEUTRON_DHCP_CONF - iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT - neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF + if is_service_enabled q-agt neutron-agent; then + _configure_neutron_plugin_agent + fi + if is_service_enabled q-dhcp neutron-dhcp; then + _configure_neutron_dhcp_agent + fi + if is_service_enabled q-l3 neutron-l3; then + _configure_neutron_l3_agent + fi + if is_service_enabled q-meta neutron-metadata-agent; then + _configure_neutron_metadata_agent fi - if is_service_enabled neutron-l3; then - cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF - iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT - neutron_service_plugin_class_add router - configure_root_helper_options $NEUTRON_L3_CONF - iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF + if [[ "$Q_DVR_MODE" != "legacy" ]]; then + _configure_dvr + fi + if is_service_enabled ceilometer; then + _configure_neutron_ceilometer_notifications + fi - # Configure the neutron agent to serve external network ports - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF linux_bridge bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE" - else - iniset $NEUTRON_CORE_PLUGIN_CONF ovs bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE" - fi + if [[ $Q_AGENT == "ovn" ]]; then + configure_ovn + configure_ovn_plugin + fi - if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then - iniset $NEUTRON_L3_CONF DEFAULT agent_mode $NEUTRON_DVR_MODE + # Configure Neutron's advanced services + if is_service_enabled q-placement neutron-placement; then + configure_placement_extension + fi + if is_service_enabled q-trunk neutron-trunk; then + configure_trunk_extension + fi + if is_service_enabled q-qos neutron-qos; then + configure_qos + if is_service_enabled q-l3 neutron-l3; then + configure_l3_agent_extension_fip_qos + configure_l3_agent_extension_gateway_ip_qos fi fi - - # Metadata - if is_service_enabled neutron-metadata-agent; then - cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF - - iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $NEUTRON_META_DATA_HOST - iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS - # TODO(ihrachys) do we really need to set rootwrap for metadata agent? - configure_root_helper_options $NEUTRON_META_CONF - - # TODO(dtroyer): remove the v2.0 hard code below - iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI - configure_keystone_authtoken_middleware $NEUTRON_META_CONF neutron DEFAULT + if is_service_enabled neutron-segments; then + configure_placement_neutron + configure_segments_extension fi - - # Format logging - setup_logging $NEUTRON_CONF - - if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT" - iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True + if is_service_enabled q-ovn-bgp; then + configure_ovn_bgp_service_plugin fi - # Metering - if is_service_enabled neutron-metering; then - cp $NEUTRON_DIR/etc/metering_agent.ini.sample $NEUTRON_METERING_AGENT_CONF - neutron_service_plugin_class_add metering + # Finally configure Neutron server and core plugin + if is_service_enabled q-agt neutron-agent q-svc neutron-api; then + _configure_neutron_service fi + + iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" + # devstack is not a tool for running uber scale OpenStack + # clouds, therefore running without a dedicated RPC worker + # for state reports is more than adequate. + iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 + + write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api" } -# configure_neutron_rootwrap() - configure Neutron's rootwrap -function configure_neutron_rootwrap { - # Deploy new rootwrap filters files (owned by root). - # Wipe any existing rootwrap.d files first - if [[ -d $NEUTRON_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $NEUTRON_CONF_DIR/rootwrap.d +function configure_neutron_nova { + create_nova_conf_neutron $NOVA_CONF + if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + create_nova_conf_neutron $conf + done fi +} - # Deploy filters to /etc/neutron/rootwrap.d - sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d - - # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $NEUTRON_CONF_DIR - sudo sed -e "s:^filters_path=.*$:filters_path=$NEUTRON_CONF_DIR/rootwrap.d:" -i $NEUTRON_CONF_DIR/rootwrap.conf - - # Set up the rootwrap sudoers for Neutron - tempfile=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_CMD *" >$tempfile - echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_DAEMON_CMD" >>$tempfile - chmod 0440 $tempfile - sudo chown root:root $tempfile - sudo mv $tempfile /etc/sudoers.d/neutron-rootwrap -} - -# Make Neutron-required changes to nova.conf -# Takes a single optional argument which is the config file to update, -# if not passed $NOVA_CONF is used. -function configure_neutron_nova_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" +function create_nova_conf_neutron { local conf=${1:-$NOVA_CONF} - iniset $conf neutron auth_type "password" - iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf neutron username neutron - iniset $conf neutron password "$SERVICE_PASSWORD" - iniset $conf neutron user_domain_name "Default" - iniset $conf neutron project_name "$SERVICE_TENANT_NAME" - iniset $conf neutron project_domain_name "Default" - iniset $conf neutron auth_strategy $NEUTRON_AUTH_STRATEGY + configure_keystoneauth $conf nova neutron iniset $conf neutron region_name "$REGION_NAME" # optionally set options in nova_conf neutron_plugin_create_nova_conf $conf - if is_service_enabled neutron-metadata-agent; then + if is_service_enabled q-meta neutron-metadata-agent; then iniset $conf neutron service_metadata_proxy "True" fi + iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" + iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" } +# create_neutron_accounts() - Set up common required neutron accounts + # Tenant User Roles # ------------------------------------------------------------------ # service neutron admin # if enabled -# create_neutron_accounts() - Create required service accounts -function create_neutron_accounts_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" +# Migrated from keystone_data.sh +function create_neutron_accounts { local neutron_url - - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/networking/ - else - neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/ + neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi - - if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then + if is_service_enabled q-svc neutron-api; then create_service_user "neutron" - neutron_service=$(get_or_create_service "neutron" \ - "network" "Neutron Service") - get_or_create_endpoint $neutron_service \ + get_or_create_service "neutron" "network" "Neutron Service" + get_or_create_endpoint \ + "network" \ "$REGION_NAME" "$neutron_url" fi } # init_neutron() - Initialize databases, etc. -function init_neutron_new { - - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - recreate_database neutron - +function init_neutron { + recreate_database $Q_DB_NAME time_start "dbsync" # Run Neutron db migrations - $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads + $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head time_stop "dbsync" } # install_neutron() - Collect source and prepare -function install_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH - setup_develop $NEUTRON_DIR - +function install_neutron { # Install neutron-lib from git so we make sure we're testing # the latest code. if use_library_from_git "neutron-lib"; then @@ -434,17 +511,23 @@ function install_neutron_new { setup_dev_lib "neutron-lib" fi - # L3 service requires radvd - if is_service_enabled neutron-l3; then - install_package radvd + # Install SQLAlchemy and alembic from git when these are required + # see https://bugs.launchpad.net/neutron/+bug/2042941 + if use_library_from_git "sqlalchemy"; then + git_clone $SQLALCHEMY_REPO $SQLALCHEMY_DIR $SQLALCHEMY_BRANCH + setup_develop $SQLALCHEMY_DIR fi - - if is_service_enabled neutron-agent neutron-dhcp neutron-l3; then - #TODO(sc68cal) - kind of ugly - source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent - neutron_plugin_install_agent_packages + if use_library_from_git "alembic"; then + git_clone $ALEMBIC_REPO $ALEMBIC_DIR $ALEMBIC_BRANCH + setup_develop $ALEMBIC_DIR fi + git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH + setup_develop $NEUTRON_DIR + + if [[ $Q_AGENT == "ovn" ]]; then + install_ovn + fi } # install_neutronclient() - Collect source and prepare @@ -452,309 +535,616 @@ function install_neutronclient { if use_library_from_git "python-neutronclient"; then git_clone_by_name "python-neutronclient" setup_dev_lib "python-neutronclient" - sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-neutronclient"]}/tools/,/etc/bash_completion.d/}neutron.bash_completion fi } -# start_neutron_api() - Start the API process ahead of other things -function start_neutron_api { - local service_port=$NEUTRON_SERVICE_PORT - local service_protocol=$NEUTRON_SERVICE_PROTOCOL - local neutron_url - if is_service_enabled tls-proxy; then - service_port=$NEUTRON_SERVICE_PORT_INT - service_protocol="http" +# install_neutron_agent_packages() - Collect source and prepare +function install_neutron_agent_packages { + # radvd doesn't come with the OS. Install it if the l3 service is enabled. + if is_service_enabled q-l3 neutron-l3; then + install_package radvd fi + # install packages that are specific to plugin agent(s) + if is_service_enabled q-agt neutron-agent q-dhcp neutron-dhcp q-l3 neutron-l3; then + neutron_plugin_install_agent_packages + fi +} - local opts="" - opts+=" --config-file $NEUTRON_CONF" - opts+=" --config-file $NEUTRON_CORE_PLUGIN_CONF" - local cfg_file - for cfg_file in ${_NEUTRON_SERVER_EXTRA_CONF_FILES_ABS[@]}; do - opts+=" --config-file $cfg_file" - done +# Finish neutron configuration +function configure_neutron_after_post_config { + if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then + iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES + fi + configure_rbac_policies +} - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/ - enable_service neutron-rpc-server - run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts" +# configure_rbac_policies() - Configure Neutron to enforce new RBAC +# policies and scopes if NEUTRON_ENFORCE_SCOPE == True +function configure_rbac_policies { + if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True + iniset $NEUTRON_CONF oslo_policy enforce_scope True else - # Start the Neutron service - # TODO(sc68cal) Stop hard coding this - run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts" - neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port - # Start proxy if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT - fi + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False + iniset $NEUTRON_CONF oslo_policy enforce_scope False fi +} - if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then - die $LINENO "neutron-api did not start" +# Start running OVN processes +function start_ovn_services { + if [[ $Q_AGENT == "ovn" ]]; then + if [ "$VIRT_DRIVER" != 'ironic' ]; then + # NOTE(TheJulia): Ironic's devstack plugin needs to perform + # additional networking configuration to setup a working test + # environment with test virtual machines to emulate baremetal, + # which requires OVN to be up and running earlier to complete + # that base configuration. + init_ovn + start_ovn + fi + if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then + if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then + echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " + echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False" + else + create_public_bridge + fi + fi fi } -# start_neutron() - Start running processes -function start_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - # Start up the neutron agents if enabled - # TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins - # can resolve the $NEUTRON_AGENT_BINARY - if is_service_enabled neutron-agent; then - # TODO(ihrachys) stop loading ml2_conf.ini into agents, instead load agent specific files - run_process neutron-agent "$NEUTRON_BIN_DIR/$NEUTRON_AGENT_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_CORE_PLUGIN_CONF" - fi - if is_service_enabled neutron-dhcp; then - neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF - run_process neutron-dhcp "$NEUTRON_BIN_DIR/$NEUTRON_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_DHCP_CONF" - fi - if is_service_enabled neutron-l3; then - run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_L3_CONF" +# Enable neutron server services based on configuration +# This function determines which neutron services should be enabled +# and adds them to ENABLED_SERVICES. It reads the neutron configuration +# to determine if RPC workers should be enabled. +# This must be called after configure_neutron has created the config files. +function enable_neutron_server_services { + local rpc_workers + + # The default value of "rpc_workers" is None (not defined). If + # "rpc_workers" is explicitly set to 0, the RPC workers process + # should not be executed. + if [[ -f $NEUTRON_CONF ]]; then + rpc_workers=$(iniget_multiline $NEUTRON_CONF DEFAULT rpc_workers) + else + # If config doesn't exist yet, assume default behavior (enable rpc workers) + rpc_workers="" fi - if is_service_enabled neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then - # XXX(sc68cal) - Here's where plugins can wire up their own networks instead - # of the code in lib/neutron_plugins/services/l3 - if type -p neutron_plugin_create_initial_networks > /dev/null; then - neutron_plugin_create_initial_networks - else - # XXX(sc68cal) Load up the built in Neutron networking code and build a topology - source $TOP_DIR/lib/neutron_plugins/services/l3 - # Create the networks using servic - create_neutron_initial_network - fi + + # Always enable these core services + enable_service neutron-api + enable_service neutron-periodic-workers + + # Conditionally enable RPC server based on configuration + if [[ "$rpc_workers" != "0" ]]; then + enable_service neutron-rpc-server fi - if is_service_enabled neutron-metadata-agent; then - run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_META_CONF" + + # Enable OVN maintenance worker if using OVN + if [[ $Q_AGENT == "ovn" ]]; then + enable_service neutron-ovn-maintenance-worker fi +} + +# Start running processes +function start_neutron_service_and_check { + local cfg_file_options + local neutron_url + + cfg_file_options="$(determine_config_files neutron-server)" + + # Enable neutron server services based on configuration + enable_neutron_server_services - if is_service_enabled neutron-metering; then - run_process neutron-metering "$NEUTRON_BIN_DIR/$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF" + # Start the Neutron service processes + run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" + neutron_url=$Q_PROTOCOL://$Q_HOST/ + + # Start RPC server if enabled (run_process checks is_service_enabled internally) + run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" + run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" + _run_ovn_maintenance + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi + echo "Waiting for Neutron to start..." + + local testcmd="wget --no-proxy -q -O- $neutron_url" + test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT } -# stop_neutron() - Stop running processes -function stop_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - for serv in neutron-api neutron-agent neutron-l3; do - stop_process $serv - done +function start_neutron { + start_l2_agent "$@" + start_other_agents "$@" +} - if is_service_enabled neutron-rpc-server; then - stop_process neutron-rpc-server +# Control of the l2 agent is separated out to make it easier to test partial +# upgrades (everything upgraded except the L2 agent) +function start_l2_agent { + run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + + if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE + sudo ip link set $OVS_PHYSICAL_BRIDGE up + sudo ip link set br-int up + sudo ip link set $PUBLIC_INTERFACE up + if is_ironic_hardware; then + for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $PUBLIC_INTERFACE + sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE + done + sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi fi +} + +function start_other_agents { + run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" + + run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" + + run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" + run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" +} + +# Start running processes, including screen +function start_neutron_agents { + # NOTE(slaweq): it's now just a wrapper for start_neutron function + start_neutron "$@" +} + +function stop_l2_agent { + stop_process q-agt +} - if is_service_enabled neutron-dhcp; then - stop_process neutron-dhcp +# stop_other() - Stop running processes +function stop_other { + if is_service_enabled q-dhcp neutron-dhcp; then + stop_process q-dhcp pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') [ ! -z "$pid" ] && sudo kill -9 $pid fi - if is_service_enabled neutron-metadata-agent; then - sudo pkill -9 -f neutron-ns-metadata-proxy || : - stop_process neutron-metadata-agent + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + stop_process neutron-api + _stop_ovn_maintenance + + if is_service_enabled q-l3 neutron-l3; then + sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" + stop_process q-l3 fi -} -# neutron_service_plugin_class_add() - add service plugin class -function neutron_service_plugin_class_add_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - local service_plugin_class=$1 - local plugins="" + if is_service_enabled q-meta neutron-metadata-agent; then + stop_process q-meta + fi - plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins) - if [ $plugins ]; then - plugins+="," + if is_service_enabled q-metering neutron-metering; then + neutron_metering_stop + fi + + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "$NEUTRON_ROOTWRAP-[d]aemon" || : fi - plugins+="${service_plugin_class}" - iniset $NEUTRON_CONF DEFAULT service_plugins $plugins } -function _neutron_ml2_extension_driver_add { - local driver=$1 - local drivers="" +# stop_neutron() - Stop running processes (non-screen) +function stop_neutron { + stop_other + stop_l2_agent - drivers=$(iniget $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers) - if [ $drivers ]; then - drivers+="," + if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then + stop_ovn fi - drivers+="${driver}" - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers $drivers } -function neutron_server_config_add_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1) +# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge +# on startup, or back to the public interface on cleanup. If no IP is +# configured on the interface, just add it as a port to the OVS bridge. +function _move_neutron_addresses_route { + local from_intf=$1 + local to_intf=$2 + local add_ovs_port=$3 + local del_ovs_port=$4 + local af=$5 + + if [[ -n "$from_intf" && -n "$to_intf" ]]; then + # Remove the primary IP address from $from_intf and add it to $to_intf, + # along with the default route, if it exists. Also, when called + # on configure we will also add $from_intf as a port on $to_intf, + # assuming it is an OVS bridge. + + local IP_REPLACE="" + local IP_DEL="" + local IP_UP="" + local DEFAULT_ROUTE_GW + DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }") + local ADD_OVS_PORT="" + local DEL_OVS_PORT="" + local ARP_CMD="" + + IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }') + + if [ "$DEFAULT_ROUTE_GW" != "" ]; then + ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" + fi + + if [[ "$add_ovs_port" == "True" ]]; then + ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" + fi + + if [[ "$del_ovs_port" == "True" ]]; then + DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf" + fi + + if [[ "$IP_BRD" != "" ]]; then + IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" + IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf" + IP_UP="sudo ip link set $to_intf up" + if [[ "$af" == "inet" ]]; then + IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) + ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP " + fi + fi + + # The add/del OVS port calls have to happen either before or + # after the address is moved in order to not leave it orphaned. + $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD + fi } -# neutron_deploy_rootwrap_filters() - deploy rootwrap filters -function neutron_deploy_rootwrap_filters_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - local srcdir=$1 - sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d - sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d +# _configure_public_network_connectivity() - Configures connectivity to the +# external network using $PUBLIC_INTERFACE or NAT on the single interface +# machines +function _configure_public_network_connectivity { + # If we've given a PUBLIC_INTERFACE to take over, then we assume + # that we can own the whole thing, and privot it into the OVS + # bridge. If we are not, we're probably on a single interface + # machine, and we just setup NAT so that fixed guests can get out. + if [[ -n "$PUBLIC_INTERFACE" ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" + + if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" + fi + else + for d in $default_v4_route_devs; do + sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE + done + fi } -# Dispatch functions -# These are needed for compatibility between the old and new implementations -# where there are function name overlaps. These will be removed when -# neutron-legacy is removed. -# TODO(sc68cal) Remove when neutron-legacy is no more. +# cleanup_neutron() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up function cleanup_neutron { - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - stop_process neutron-api - stop_process neutron-rpc-server - remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" - sudo rm -f $(apache_site_config_for neutron-api) + stop_process neutron-api + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + _stop_ovn_maintenance + remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" + sudo rm -f $(apache_site_config_for neutron-api) + + if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" + + if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then + # ip(8) wants the prefix length when deleting + local v6_gateway + v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }') + sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6" + fi + + if is_provider_network && is_ironic_hardware; then + for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE + sudo ip addr add $IP dev $PUBLIC_INTERFACE + done + sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi fi - if is_neutron_legacy_enabled; then - # Call back to old function - cleanup_mutnauq "$@" - else - cleanup_neutron_new "$@" + if is_neutron_ovs_base_plugin; then + neutron_ovs_base_cleanup + fi + + # delete all namespaces created by neutron + for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do + sudo ip netns delete ${ns} + done + + if [[ $Q_AGENT == "ovn" ]]; then + cleanup_ovn fi } -function configure_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - configure_mutnauq "$@" + +function _create_neutron_conf_dir { + # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find + sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR +} + +# _configure_neutron_common() +# Set common config for all neutron server and agents. +# This MUST be called before other ``_configure_neutron_*`` functions. +function _configure_neutron_common { + _create_neutron_conf_dir + + # Uses oslo config generator to generate core sample configuration files + (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) + + cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF + + Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json + + # allow neutron user to administer neutron to match neutron account + # NOTE(amotoki): This is required for nova works correctly with neutron. + if [ -f $NEUTRON_DIR/etc/policy.json ]; then + cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE + sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE else - configure_neutron_new "$@" + echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE fi - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking" + # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. + # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. + neutron_plugin_configure_common + + if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then + die $LINENO "Neutron plugin not set.. exiting" fi -} -# configure_rbac_policies() - Configure Neutron to enforce new RBAC -# policies and scopes if NEUTRON_ENFORCE_SCOPE == True -function configure_rbac_policies { - if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "ENFORCE_SCOPE" == "True" ]]; then - iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True - iniset $NEUTRON_CONF oslo_policy enforce_scope True - else - iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False - iniset $NEUTRON_CONF oslo_policy enforce_scope False + # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` + mkdir -p /$Q_PLUGIN_CONF_PATH + Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME + # NOTE(slaweq): NEUTRON_CORE_PLUGIN_CONF is used e.g. in neutron repository, + # it was previously defined in the lib/neutron module which is now deleted. + NEUTRON_CORE_PLUGIN_CONF=$Q_PLUGIN_CONF_FILE + # NOTE(hichihara): Some neutron vendor plugins were already decomposed and + # there is no config file in Neutron tree. They should prepare the file in each plugin. + if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then + cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE + elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then + cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE fi + + iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` + iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron + iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG + iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS + iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock + + # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation + iniset $NEUTRON_CONF nova region_name $REGION_NAME + + if [ "$VIRT_DRIVER" = 'fake' ]; then + # Disable arbitrary limits + iniset $NEUTRON_CONF quotas quota_network -1 + iniset $NEUTRON_CONF quotas quota_subnet -1 + iniset $NEUTRON_CONF quotas quota_port -1 + iniset $NEUTRON_CONF quotas quota_security_group -1 + iniset $NEUTRON_CONF quotas quota_security_group_rule -1 + fi + + # Format logging + setup_logging $NEUTRON_CONF + + _neutron_setup_rootwrap } +function _configure_neutron_dhcp_agent { -function configure_neutron_nova { - if is_neutron_legacy_enabled; then - # Call back to old function - create_nova_conf_neutron $NOVA_CONF - if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then - for i in $(seq 1 $NOVA_NUM_CELLS); do - local conf - conf=$(conductor_conf $i) - create_nova_conf_neutron $conf - done - fi - else - configure_neutron_nova_new $NOVA_CONF - if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then - for i in $(seq 1 $NOVA_NUM_CELLS); do - local conf - conf=$(conductor_conf $i) - configure_neutron_nova_new $conf - done + cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE + + iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + # make it so we have working DNS from guests + iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True + configure_root_helper_options $Q_DHCP_CONF_FILE + + if ! is_service_enabled q-l3 neutron-l3; then + if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then + iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA + iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK + else + if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then + die "$LINENO" "Enable isolated metadata is a must for metadata network" + fi fi fi + + _neutron_setup_interface_driver $Q_DHCP_CONF_FILE + + neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE } -function create_neutron_accounts { - if is_neutron_legacy_enabled; then - # Call back to old function - create_mutnauq_accounts "$@" - else - create_neutron_accounts_new "$@" - fi + +function _configure_neutron_metadata_agent { + cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE + + iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS + configure_root_helper_options $Q_META_CONF_FILE } -function init_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - init_mutnauq "$@" - else - init_neutron_new "$@" - fi +function _configure_neutron_ceilometer_notifications { + iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2 } -function install_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - install_mutnauq "$@" - else - install_neutron_new "$@" - fi +function _configure_neutron_metering { + neutron_agent_metering_configure_common + neutron_agent_metering_configure_agent +} + +function _configure_dvr { + iniset $NEUTRON_CONF DEFAULT router_distributed True + iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE +} + + +# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent +# It is called when q-agt is enabled. +function _configure_neutron_plugin_agent { + # Specify the default root helper prior to agent configuration to + # ensure that an agent's configuration can override the default + configure_root_helper_options /$Q_PLUGIN_CONF_FILE + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + + # Configure agent for plugin + neutron_plugin_configure_plugin_agent } +# _configure_neutron_service() - Set config files for neutron service +# It is called when q-svc is enabled. +function _configure_neutron_service { + Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini + cp $NEUTRON_DIR/etc/neutron/api-paste.ini $Q_API_PASTE_FILE + + # Update either configuration file with plugin + iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS + + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE + + iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY + configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME + + # Configuration for neutron notifications to nova. + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES + + configure_keystoneauth $NEUTRON_CONF nova nova + + # Configuration for placement client + configure_keystoneauth $NEUTRON_CONF placement placement + + # Configure plugin + neutron_plugin_configure_service +} + +# Utility Functions +#------------------ + +# neutron_service_plugin_class_add() - add service plugin class function neutron_service_plugin_class_add { - if is_neutron_legacy_enabled; then - # Call back to old function - _neutron_service_plugin_class_add "$@" - else - neutron_service_plugin_class_add_new "$@" + local service_plugin_class=$1 + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class + elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" fi } +# neutron_ml2_extension_driver_add() - add ML2 extension driver function neutron_ml2_extension_driver_add { - if is_neutron_legacy_enabled; then - # Call back to old function - _neutron_ml2_extension_driver_add_old "$@" - else - _neutron_ml2_extension_driver_add "$@" + local extension=$1 + if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS=$extension + elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension" fi } -function install_neutron_agent_packages { - if is_neutron_legacy_enabled; then - # Call back to old function - install_neutron_agent_packages_mutnauq "$@" - else - : - fi +# neutron_server_config_add() - add server config file +function neutron_server_config_add { + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1) } -function neutron_server_config_add { - if is_neutron_legacy_enabled; then - # Call back to old function - mutnauq_server_config_add "$@" - else - neutron_server_config_add_new "$@" +# neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). +function neutron_deploy_rootwrap_filters { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return fi + local srcdir=$1 + sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D + sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ } -function start_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - start_mutnauq_l2_agent "$@" - start_mutnauq_other_agents "$@" - else - start_neutron_new "$@" +# _neutron_setup_rootwrap() - configure Neutron's rootwrap +function _neutron_setup_rootwrap { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi + # Wipe any existing ``rootwrap.d`` files first + Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d + if [[ -d $Q_CONF_ROOTWRAP_D ]]; then + sudo rm -rf $Q_CONF_ROOTWRAP_D fi + + neutron_deploy_rootwrap_filters $NEUTRON_DIR + + # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` + # location moved in newer versions, prefer new location + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE + sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i $Q_RR_CONF_FILE + + # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap + ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" + ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + + # Set up the rootwrap sudoers for neutron + TEMPFILE=`mktemp` + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap + + # Update the root_helper + configure_root_helper_options $NEUTRON_CONF } -function stop_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - stop_mutnauq "$@" - else - stop_neutron_new "$@" +function configure_root_helper_options { + local conffile=$1 + iniset $conffile agent root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $conffile agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi } -function neutron_deploy_rootwrap_filters { - if is_neutron_legacy_enabled; then - # Call back to old function - _neutron_deploy_rootwrap_filters "$@" - else - neutron_deploy_rootwrap_filters_new "$@" +function _neutron_setup_interface_driver { + + # ovs_use_veth needs to be set before the plugin configuration + # occurs to allow plugins to override the setting. + iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH + + neutron_plugin_setup_interface_driver $1 +} +# Functions for Neutron Exercises +#-------------------------------- + +# ssh check +function _ssh_check_neutron { + local from_net=$1 + local key_file=$2 + local ip=$3 + local user=$4 + local timeout_sec=$5 + local probe_cmd = "" + probe_cmd=`_get_probe_cmd_prefix $from_net` + local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success" + test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec +} + +function plugin_agent_add_l2_agent_extension { + local l2_agent_extension=$1 + if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then + L2_AGENT_EXTENSIONS=$l2_agent_extension + elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then + L2_AGENT_EXTENSIONS+=",$l2_agent_extension" fi } # Restore xtrace -$XTRACE +$_XTRACE_NEUTRON + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 253b457ae1..e90400fec1 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1,1075 +1,6 @@ #!/bin/bash -# -# lib/neutron -# functions - functions specific to neutron -# Dependencies: -# ``functions`` file -# ``DEST`` must be defined -# ``STACK_USER`` must be defined +# TODO(slaweq): remove that file when other projects, like e.g. Grenade will +# be using lib/neutron -# ``stack.sh`` calls the entry points in this order: -# -# - install_neutron_agent_packages -# - install_neutronclient -# - install_neutron -# - install_neutron_third_party -# - configure_neutron -# - init_neutron -# - configure_neutron_third_party -# - init_neutron_third_party -# - start_neutron_third_party -# - create_nova_conf_neutron -# - configure_neutron_after_post_config -# - start_neutron_service_and_check -# - check_neutron_third_party_integration -# - start_neutron_agents -# - create_neutron_initial_network -# -# ``unstack.sh`` calls the entry points in this order: -# -# - stop_neutron -# - stop_neutron_third_party -# - cleanup_neutron - -# Functions in lib/neutron are classified into the following categories: -# -# - entry points (called from stack.sh or unstack.sh) -# - internal functions -# - neutron exercises -# - 3rd party programs - - -# Neutron Networking -# ------------------ - -# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want -# to run Neutron on this host, make sure that q-svc is also in -# ``ENABLED_SERVICES``. -# -# See "Neutron Network Configuration" below for additional variables -# that must be set in localrc for connectivity across hosts with -# Neutron. - -# Settings -# -------- - - -# Neutron Network Configuration -# ----------------------------- - -if is_service_enabled tls-proxy; then - Q_PROTOCOL="https" -fi - - -# Set up default directories -GITDIR["python-neutronclient"]=$DEST/python-neutronclient - - -NEUTRON_DIR=$DEST/neutron -NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas - -# Support entry points installation of console scripts -if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then - NEUTRON_BIN_DIR=$NEUTRON_DIR/bin -else - NEUTRON_BIN_DIR=$(get_python_exec_prefix) -fi - -NEUTRON_CONF_DIR=/etc/neutron -NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf -export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} - -# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: -# - False (default) : Run neutron under Eventlet -# - True : Run neutron under uwsgi -# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable -# enough -NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) - -NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini - -# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" -# and "enforce_new_defaults" to True in the Neutron's config to enforce usage -# of the new RBAC policies and scopes. -NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE) - -# Agent binaries. Note, binary paths for other agents are set in per-service -# scripts in lib/neutron_plugins/services/ -AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" -AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} -AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" - -# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and -# loaded from per-plugin scripts in lib/neutron_plugins/ -Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini -Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini -Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini - -# Default name for Neutron database -Q_DB_NAME=${Q_DB_NAME:-neutron} -# Default Neutron Plugin -Q_PLUGIN=${Q_PLUGIN:-ml2} -# Default Neutron Port -Q_PORT=${Q_PORT:-9696} -# Default Neutron Internal Port when using TLS proxy -Q_PORT_INT=${Q_PORT_INT:-19696} -# Default Neutron Host -Q_HOST=${Q_HOST:-$SERVICE_HOST} -# Default protocol -Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} -# Default listen address -Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} -# Default admin username -Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} -# Default auth strategy -Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} -# RHEL's support for namespaces requires using veths with ovs -Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} -Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} -Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) -# Meta data IP -Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} -# Allow Overlapping IP among subnets -Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} -Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} -Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} -VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} -VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} - -# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. -# /etc/neutron is assumed by many of devstack plugins. Do not change. -_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron - -# List of config file names in addition to the main plugin config file -# To add additional plugin config files, use ``neutron_server_config_add`` -# utility function. For example: -# -# ``neutron_server_config_add file1`` -# -# These config files are relative to ``/etc/neutron``. The above -# example would specify ``--config-file /etc/neutron/file1`` for -# neutron server. -declare -a -g Q_PLUGIN_EXTRA_CONF_FILES - -# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path. -declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS - - -Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf -if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - Q_RR_COMMAND="sudo" -else - NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) - Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" - fi -fi - - -# Distributed Virtual Router (DVR) configuration -# Can be: -# - ``legacy`` - No DVR functionality -# - ``dvr_snat`` - Controller or single node DVR -# - ``dvr`` - Compute node in multi-node DVR -# -Q_DVR_MODE=${Q_DVR_MODE:-legacy} -if [[ "$Q_DVR_MODE" != "legacy" ]]; then - Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population -fi - -# Provider Network Configurations -# -------------------------------- - -# The following variables control the Neutron ML2 plugins' allocation -# of tenant networks and availability of provider networks. If these -# are not configured in ``localrc``, tenant networks will be local to -# the host (with no remote connectivity), and no physical resources -# will be available for the allocation of provider networks. - -# To disable tunnels (GRE or VXLAN) for tenant networks, -# set to False in ``local.conf``. -# GRE tunnels are only supported by the openvswitch. -ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} - -# If using GRE, VXLAN or GENEVE tunnels for tenant networks, -# specify the range of IDs from which tenant networks are -# allocated. Can be overridden in ``localrc`` if necessary. -TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} - -# To use VLANs for tenant networks, set to True in localrc. VLANs -# are supported by the ML2 plugins, requiring additional configuration -# described below. -ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} - -# If using VLANs for tenant networks, set in ``localrc`` to specify -# the range of VLAN VIDs from which tenant networks are -# allocated. An external network switch must be configured to -# trunk these VLANs between hosts for multi-host connectivity. -# -# Example: ``TENANT_VLAN_RANGE=1000:1999`` -TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} - -# If using VLANs for tenant networks, or if using flat or VLAN -# provider networks, set in ``localrc`` to the name of the physical -# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the -# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge -# agent, as described below. -# -# Example: ``PHYSICAL_NETWORK=default`` -PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} - -# With the openvswitch agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the OVS bridge to use for the physical network. The -# bridge will be created if it does not already exist, but a -# physical interface must be manually added to the bridge as a -# port for external connectivity. -# -# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` -OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} - -# With the linuxbridge agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the network interface to use for the physical -# network. -# -# Example: ``LB_PHYSICAL_INTERFACE=eth1`` -if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then - default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}') - die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" - LB_PHYSICAL_INTERFACE=$default_route_dev -fi - -# When Neutron tunnels are enabled it is needed to specify the -# IP address of the end point in the local server. This IP is set -# by default to the same IP address that the HOST IP. -# This variable can be used to specify a different end point IP address -# Example: ``TUNNEL_ENDPOINT_IP=1.1.1.1`` -TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-$HOST_IP} - -# With the openvswitch plugin, set to True in ``localrc`` to enable -# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. -# -# Example: ``OVS_ENABLE_TUNNELING=True`` -OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} - -# Use DHCP agent for providing metadata service in the case of -# without L3 agent (No Route Agent), set to True in localrc. -ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} - -# Add a static route as dhcp option, so the request to 169.254.169.254 -# will be able to reach through a route(DHCP agent) -# This option require ENABLE_ISOLATED_METADATA = True -ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} -# Neutron plugin specific functions -# --------------------------------- - -# Please refer to ``lib/neutron_plugins/README.md`` for details. -if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then - source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN -fi - -# Agent metering service plugin functions -# ------------------------------------------- - -# Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/neutron_plugins/services/metering - -# L3 Service functions -source $TOP_DIR/lib/neutron_plugins/services/l3 - -# Additional Neutron service plugins -source $TOP_DIR/lib/neutron_plugins/services/placement -source $TOP_DIR/lib/neutron_plugins/services/trunk -source $TOP_DIR/lib/neutron_plugins/services/qos - -# Use security group or not -if has_neutron_plugin_security_group; then - Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} -else - Q_USE_SECGROUP=False -fi - -# Save trace setting -_XTRACE_NEUTRON=$(set +o | grep xtrace) -set +o xtrace - - -# Functions -# --------- - -function _determine_config_server { - if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then - if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then - deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" - else - die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" - fi - fi - if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then - deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead." - fi - for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do - _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file) - done - - local cfg_file - local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do - opts+=" --config-file $cfg_file" - done - echo "$opts" -} - -function _determine_config_l3 { - local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" - echo "$opts" -} - -# For services and agents that require it, dynamically construct a list of -# --config-file arguments that are passed to the binary. -function determine_config_files { - local opts="" - case "$1" in - "neutron-server") opts="$(_determine_config_server)" ;; - "neutron-l3-agent") opts="$(_determine_config_l3)" ;; - esac - if [ -z "$opts" ] ; then - die $LINENO "Could not determine config files for $1." - fi - echo "$opts" -} - -# configure_mutnauq() -# Set common config for all neutron server and agents. -function configure_mutnauq { - _configure_neutron_common - iniset_rpc_backend neutron $NEUTRON_CONF - - if is_service_enabled q-metering; then - _configure_neutron_metering - fi - if is_service_enabled q-agt q-svc; then - _configure_neutron_service - fi - if is_service_enabled q-agt; then - _configure_neutron_plugin_agent - fi - if is_service_enabled q-dhcp; then - _configure_neutron_dhcp_agent - fi - if is_service_enabled q-l3; then - _configure_neutron_l3_agent - fi - if is_service_enabled q-meta; then - _configure_neutron_metadata_agent - fi - - if [[ "$Q_DVR_MODE" != "legacy" ]]; then - _configure_dvr - fi - if is_service_enabled ceilometer; then - _configure_neutron_ceilometer_notifications - fi - - if [[ $Q_AGENT == "ovn" ]]; then - configure_ovn - configure_ovn_plugin - fi - - # Configure Neutron's advanced services - if is_service_enabled q-placement neutron-placement; then - configure_placement_extension - fi - if is_service_enabled q-trunk neutron-trunk; then - configure_trunk_extension - fi - if is_service_enabled q-qos neutron-qos; then - configure_qos - if is_service_enabled q-l3 neutron-l3; then - configure_l3_agent_extension_fip_qos - configure_l3_agent_extension_gateway_ip_qos - fi - fi - - iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" - # devstack is not a tool for running uber scale OpenStack - # clouds, therefore running without a dedicated RPC worker - # for state reports is more than adequate. - iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 -} - -function create_nova_conf_neutron { - local conf=${1:-$NOVA_CONF} - iniset $conf neutron auth_type "password" - iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf neutron username "$Q_ADMIN_USERNAME" - iniset $conf neutron password "$SERVICE_PASSWORD" - iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf neutron project_name "$SERVICE_PROJECT_NAME" - iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY" - iniset $conf neutron region_name "$REGION_NAME" - - # optionally set options in nova_conf - neutron_plugin_create_nova_conf $conf - - if is_service_enabled q-meta; then - iniset $conf neutron service_metadata_proxy "True" - fi - - iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" - iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" -} - -# create_mutnauq_accounts() - Set up common required neutron accounts - -# Tenant User Roles -# ------------------------------------------------------------------ -# service neutron admin # if enabled - -# Migrated from keystone_data.sh -function create_mutnauq_accounts { - local neutron_url - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$Q_PROTOCOL://$SERVICE_HOST/networking/ - else - neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/ - fi - - if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - - create_service_user "neutron" - - get_or_create_service "neutron" "network" "Neutron Service" - get_or_create_endpoint \ - "network" \ - "$REGION_NAME" "$neutron_url" - fi -} - -# init_mutnauq() - Initialize databases, etc. -function init_mutnauq { - recreate_database $Q_DB_NAME - time_start "dbsync" - # Run Neutron db migrations - $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head - time_stop "dbsync" -} - -# install_mutnauq() - Collect source and prepare -function install_mutnauq { - # Install neutron-lib from git so we make sure we're testing - # the latest code. - if use_library_from_git "neutron-lib"; then - git_clone_by_name "neutron-lib" - setup_dev_lib "neutron-lib" - fi - - git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH - setup_develop $NEUTRON_DIR - - if [[ $Q_AGENT == "ovn" ]]; then - install_ovn - fi -} - -# install_neutron_agent_packages() - Collect source and prepare -function install_neutron_agent_packages_mutnauq { - # radvd doesn't come with the OS. Install it if the l3 service is enabled. - if is_service_enabled q-l3; then - install_package radvd - fi - # install packages that are specific to plugin agent(s) - if is_service_enabled q-agt q-dhcp q-l3; then - neutron_plugin_install_agent_packages - fi -} - -# Finish neutron configuration -function configure_neutron_after_post_config { - if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then - iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES - fi - configure_rbac_policies -} - -# configure_rbac_policies() - Configure Neutron to enforce new RBAC -# policies and scopes if NEUTRON_ENFORCE_SCOPE == True -function configure_rbac_policies { - if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then - iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True - iniset $NEUTRON_CONF oslo_policy enforce_scope True - else - iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False - iniset $NEUTRON_CONF oslo_policy enforce_scope False - fi -} - -# Start running OVN processes -function start_ovn_services { - if [[ $Q_AGENT == "ovn" ]]; then - init_ovn - start_ovn - if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then - if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then - echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " - echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False" - else - create_public_bridge - fi - fi - fi -} - -# Start running processes -function start_neutron_service_and_check { - local service_port=$Q_PORT - local service_protocol=$Q_PROTOCOL - local cfg_file_options - local neutron_url - - cfg_file_options="$(determine_config_files neutron-server)" - - if is_service_enabled tls-proxy; then - service_port=$Q_PORT_INT - service_protocol="http" - fi - # Start the Neutron service - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - enable_service neutron-api - run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$Q_PROTOCOL://$Q_HOST/networking/ - enable_service neutron-rpc-server - run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" - else - run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" - neutron_url=$service_protocol://$Q_HOST:$service_port - # Start proxy if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT - fi - fi - echo "Waiting for Neutron to start..." - - local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url" - test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT -} - -# Control of the l2 agent is separated out to make it easier to test partial -# upgrades (everything upgraded except the L2 agent) -function start_mutnauq_l2_agent { - run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - - if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then - sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE - sudo ip link set $OVS_PHYSICAL_BRIDGE up - sudo ip link set br-int up - sudo ip link set $PUBLIC_INTERFACE up - if is_ironic_hardware; then - for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $PUBLIC_INTERFACE - sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE - done - sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE - fi - fi -} - -function start_mutnauq_other_agents { - run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" - - run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" - - run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" - run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" -} - -# Start running processes, including screen -function start_neutron_agents { - # Start up the neutron agents if enabled - start_mutnauq_l2_agent - start_mutnauq_other_agents -} - -function stop_mutnauq_l2_agent { - stop_process q-agt -} - -# stop_mutnauq_other() - Stop running processes -function stop_mutnauq_other { - if is_service_enabled q-dhcp; then - stop_process q-dhcp - pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid - fi - - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - stop_process neutron-rpc-server - stop_process neutron-api - else - stop_process q-svc - fi - - if is_service_enabled q-l3; then - sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" - stop_process q-l3 - fi - - if is_service_enabled q-meta; then - sudo pkill -9 -f neutron-ns-metadata-proxy || : - stop_process q-meta - fi - - if is_service_enabled q-metering; then - neutron_metering_stop - fi - - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - sudo pkill -9 -f $NEUTRON_ROOTWRAP-daemon || : - fi -} - -# stop_neutron() - Stop running processes (non-screen) -function stop_mutnauq { - stop_mutnauq_other - stop_mutnauq_l2_agent - - if [[ $Q_AGENT == "ovn" ]]; then - stop_ovn - fi -} - -# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge -# on startup, or back to the public interface on cleanup. If no IP is -# configured on the interface, just add it as a port to the OVS bridge. -function _move_neutron_addresses_route { - local from_intf=$1 - local to_intf=$2 - local add_ovs_port=$3 - local del_ovs_port=$4 - local af=$5 - - if [[ -n "$from_intf" && -n "$to_intf" ]]; then - # Remove the primary IP address from $from_intf and add it to $to_intf, - # along with the default route, if it exists. Also, when called - # on configure we will also add $from_intf as a port on $to_intf, - # assuming it is an OVS bridge. - - local IP_REPLACE="" - local IP_DEL="" - local IP_UP="" - local DEFAULT_ROUTE_GW - DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }") - local ADD_OVS_PORT="" - local DEL_OVS_PORT="" - local ARP_CMD="" - - IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }') - - if [ "$DEFAULT_ROUTE_GW" != "" ]; then - ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" - fi - - if [[ "$add_ovs_port" == "True" ]]; then - ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" - fi - - if [[ "$del_ovs_port" == "True" ]]; then - DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf" - fi - - if [[ "$IP_BRD" != "" ]]; then - IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" - IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf" - IP_UP="sudo ip link set $to_intf up" - if [[ "$af" == "inet" ]]; then - IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) - ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP " - fi - fi - - # The add/del OVS port calls have to happen either before or - # after the address is moved in order to not leave it orphaned. - $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD - fi -} - -# _configure_public_network_connectivity() - Configures connectivity to the -# external network using $PUBLIC_INTERFACE or NAT on the single interface -# machines -function _configure_public_network_connectivity { - # If we've given a PUBLIC_INTERFACE to take over, then we assume - # that we can own the whole thing, and privot it into the OVS - # bridge. If we are not, we're probably on a single interface - # machine, and we just setup NAT so that fixed guests can get out. - if [[ -n "$PUBLIC_INTERFACE" ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" - - if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" - fi - else - for d in $default_v4_route_devs; do - sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE - done - fi -} - -# cleanup_mutnauq() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_mutnauq { - - if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" - - if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then - # ip(8) wants the prefix length when deleting - local v6_gateway - v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }') - sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6" - fi - - if is_provider_network && is_ironic_hardware; then - for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE - sudo ip addr add $IP dev $PUBLIC_INTERFACE - done - sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE - fi - fi - - if is_neutron_ovs_base_plugin; then - neutron_ovs_base_cleanup - fi - - if [[ $Q_AGENT == "linuxbridge" ]]; then - neutron_lb_cleanup - fi - - # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do - sudo ip netns delete ${ns} - done - - if [[ $Q_AGENT == "ovn" ]]; then - cleanup_ovn - fi -} - - -function _create_neutron_conf_dir { - # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find - sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR -} - -# _configure_neutron_common() -# Set common config for all neutron server and agents. -# This MUST be called before other ``_configure_neutron_*`` functions. -function _configure_neutron_common { - _create_neutron_conf_dir - - # Uses oslo config generator to generate core sample configuration files - (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - - cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF - - Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json - - # allow neutron user to administer neutron to match neutron account - # NOTE(amotoki): This is required for nova works correctly with neutron. - if [ -f $NEUTRON_DIR/etc/policy.json ]; then - cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE - else - echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE - fi - - # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. - # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. - neutron_plugin_configure_common - - if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then - die $LINENO "Neutron plugin not set.. exiting" - fi - - # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` - mkdir -p /$Q_PLUGIN_CONF_PATH - Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - # NOTE(hichihara): Some neutron vendor plugins were already decomposed and - # there is no config file in Neutron tree. They should prepare the file in each plugin. - if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then - cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE - elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then - cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE - fi - - iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` - iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron - iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG - iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS - iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock - - # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation - iniset $NEUTRON_CONF nova region_name $REGION_NAME - - if [ "$VIRT_DRIVER" = 'fake' ]; then - # Disable arbitrary limits - iniset $NEUTRON_CONF quotas quota_network -1 - iniset $NEUTRON_CONF quotas quota_subnet -1 - iniset $NEUTRON_CONF quotas quota_port -1 - iniset $NEUTRON_CONF quotas quota_security_group -1 - iniset $NEUTRON_CONF quotas quota_security_group_rule -1 - fi - - # Format logging - setup_logging $NEUTRON_CONF - - if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" - iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True - fi - - _neutron_setup_rootwrap -} - -function _configure_neutron_dhcp_agent { - - cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE - - iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - # make it so we have working DNS from guests - iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True - iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - - if ! is_service_enabled q-l3; then - if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA - iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK - else - if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then - die "$LINENO" "Enable isolated metadata is a must for metadata network" - fi - fi - fi - - _neutron_setup_interface_driver $Q_DHCP_CONF_FILE - - neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE -} - - -function _configure_neutron_metadata_agent { - cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE - - iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP - iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS - iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi -} - -function _configure_neutron_ceilometer_notifications { - iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2 -} - -function _configure_neutron_metering { - neutron_agent_metering_configure_common - neutron_agent_metering_configure_agent -} - -function _configure_dvr { - iniset $NEUTRON_CONF DEFAULT router_distributed True - iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE -} - - -# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent -# It is called when q-agt is enabled. -function _configure_neutron_plugin_agent { - # Specify the default root helper prior to agent configuration to - # ensure that an agent's configuration can override the default - iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - - # Configure agent for plugin - neutron_plugin_configure_plugin_agent -} - -# _configure_neutron_service() - Set config files for neutron service -# It is called when q-svc is enabled. -function _configure_neutron_service { - Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini - cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - - # Update either configuration file with plugin - iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS - - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE - iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP - - iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY - configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME - - # Configuration for neutron notifications to nova. - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES - - configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova - - # Configure plugin - neutron_plugin_configure_service -} - -# Utility Functions -#------------------ - -# _neutron_service_plugin_class_add() - add service plugin class -function _neutron_service_plugin_class_add { - local service_plugin_class=$1 - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class - elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" - fi -} - -# _neutron_ml2_extension_driver_add_old() - add ML2 extension driver -function _neutron_ml2_extension_driver_add_old { - local extension=$1 - if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then - Q_ML2_PLUGIN_EXT_DRIVERS=$extension - elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then - Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension" - fi -} - -# mutnauq_server_config_add() - add server config file -function mutnauq_server_config_add { - _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1) -} - -# _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). -function _neutron_deploy_rootwrap_filters { - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - return - fi - local srcdir=$1 - sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D - sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ -} - -# _neutron_setup_rootwrap() - configure Neutron's rootwrap -function _neutron_setup_rootwrap { - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - return - fi - # Wipe any existing ``rootwrap.d`` files first - Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d - if [[ -d $Q_CONF_ROOTWRAP_D ]]; then - sudo rm -rf $Q_CONF_ROOTWRAP_D - fi - - _neutron_deploy_rootwrap_filters $NEUTRON_DIR - - # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` - # location moved in newer versions, prefer new location - if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE - else - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE - fi - sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE - sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE - - # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap - ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" - ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" - - # Set up the rootwrap sudoers for neutron - TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE - chmod 0440 $TEMPFILE - sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap - - # Update the root_helper - iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $NEUTRON_CONF agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi -} - -function _neutron_setup_interface_driver { - - # ovs_use_veth needs to be set before the plugin configuration - # occurs to allow plugins to override the setting. - iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH - - neutron_plugin_setup_interface_driver $1 -} -# Functions for Neutron Exercises -#-------------------------------- - -function delete_probe { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` - neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id -} - -function _get_net_id { - openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}' -} - -function _get_probe_cmd_prefix { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` - echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" -} - -# ssh check -function _ssh_check_neutron { - local from_net=$1 - local key_file=$2 - local ip=$3 - local user=$4 - local timeout_sec=$5 - local probe_cmd = "" - probe_cmd=`_get_probe_cmd_prefix $from_net` - local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success" - test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec -} - -function plugin_agent_add_l2_agent_extension { - local l2_agent_extension=$1 - if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then - L2_AGENT_EXTENSIONS=$l2_agent_extension - elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then - L2_AGENT_EXTENSIONS+=",$l2_agent_extension" - fi -} - -# Restore xtrace -$_XTRACE_NEUTRON - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: +source $TOP_DIR/lib/neutron diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md index ed40886fda..728aaee85f 100644 --- a/lib/neutron_plugins/README.md +++ b/lib/neutron_plugins/README.md @@ -13,7 +13,7 @@ Plugin specific configuration variables should be in this file. functions --------- -``lib/neutron-legacy`` calls the following functions when the ``$Q_PLUGIN`` is enabled +``lib/neutron`` calls the following functions when the ``$Q_PLUGIN`` is enabled * ``neutron_plugin_create_nova_conf`` : optionally set options in nova_conf diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index d3f5bd5752..84ca7ec42c 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -67,7 +67,7 @@ function has_neutron_plugin_security_group { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index 310b72e5ad..96400634af 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -72,7 +72,7 @@ function has_neutron_plugin_security_group { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent deleted file mode 100644 index bdeaf0f3c6..0000000000 --- a/lib/neutron_plugins/linuxbridge_agent +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash -# -# Neutron Linux Bridge L2 agent -# ----------------------------- - -# Save trace setting -_XTRACE_NEUTRON_LB=$(set +o | grep xtrace) -set +o xtrace - -function neutron_lb_cleanup { - sudo ip link delete $PUBLIC_BRIDGE - - bridge_list=`ls /sys/class/net/*/bridge/bridge_id 2>/dev/null | cut -f5 -d/` - if [[ -z "$bridge_list" ]]; then - return - fi - if [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vxlan" ]]; then - for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do - sudo ip link delete $port - done - elif [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vlan" ]]; then - for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do - sudo ip link delete $port - done - fi - for bridge in $(echo $bridge_list |grep -o -e brq[0-9a-f\-]*); do - sudo ip link delete $bridge - done -} - -function is_neutron_ovs_base_plugin { - # linuxbridge doesn't use OVS - return 1 -} - -function neutron_plugin_create_nova_conf { - : -} - -function neutron_plugin_install_agent_packages { - : -} - -function neutron_plugin_configure_dhcp_agent { - local conf_file=$1 - : -} - -function neutron_plugin_configure_l3_agent { - local conf_file=$1 - sudo ip link add $PUBLIC_BRIDGE type bridge - set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU -} - -function neutron_plugin_configure_plugin_agent { - # Setup physical network interface mappings. Override - # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$LB_INTERFACE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then - LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE - fi - if [[ "$PUBLIC_BRIDGE" != "" ]] && [[ "$PUBLIC_PHYSICAL_NETWORK" != "" ]]; then - if is_service_enabled q-l3 || is_service_enabled neutron-l3; then - iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE" - fi - fi - if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS - fi - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver - if ! running_in_container; then - enable_kernel_bridge_firewall - fi - else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - fi - AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" - iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES - - # Configure vxlan tunneling - if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then - if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "True" - iniset /$Q_PLUGIN_CONF_FILE vxlan local_ip $TUNNEL_ENDPOINT_IP - else - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False" - fi - else - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False" - fi -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver linuxbridge -} - -function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 -} - -# Restore xtrace -$_XTRACE_NEUTRON_LB diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index f00feac6b4..71bede842e 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -67,21 +67,21 @@ function neutron_plugin_configure_common { Q_PLUGIN_CLASS="ml2" # The ML2 plugin delegates L3 routing/NAT functionality to # the L3 service plugin which must therefore be specified. - _neutron_service_plugin_class_add $ML2_L3_PLUGIN + neutron_service_plugin_class_add $ML2_L3_PLUGIN } function neutron_plugin_configure_service { if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "local" ]]; then - Q_SRV_EXTRA_OPTS+=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE) + Q_SRV_EXTRA_OPTS+=(project_network_types=$Q_ML2_TENANT_NETWORK_TYPE) elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then # This assumes you want a simple configuration, and will overwrite # Q_SRV_EXTRA_OPTS if set in addition to ENABLE_TENANT_TUNNELS. - Q_SRV_EXTRA_OPTS+=(tenant_network_types=gre) + Q_SRV_EXTRA_OPTS+=(project_network_types=gre) Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=(tunnel_id_ranges=$TENANT_TUNNEL_RANGES) elif [[ "$ENABLE_TENANT_VLANS" == "True" ]]; then - Q_SRV_EXTRA_OPTS+=(tenant_network_types=vlan) + Q_SRV_EXTRA_OPTS+=(project_network_types=vlan) else - echo "WARNING - The ml2 plugin is using local tenant networks, with no connectivity between hosts." + echo "WARNING - The ml2 plugin is using local project networks, with no connectivity between hosts." fi # Allow for overrding VLAN configuration (for example, to configure provider @@ -111,20 +111,10 @@ function neutron_plugin_configure_service { fi fi fi - # REVISIT(rkukura): Setting firewall_driver here for - # neutron.agent.securitygroups_rpc.is_firewall_enabled() which is - # used in the server, in case no L2 agent is configured on the - # server's node. If an L2 agent is configured, this will get - # overridden with the correct driver. The ml2 plugin should - # instead use its own config variable to indicate whether security - # groups is enabled, and that will need to be set here instead. - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver - else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - fi + populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group=$Q_USE_SECGROUP populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS diff --git a/lib/neutron_plugins/openvswitch b/lib/neutron_plugins/openvswitch index 130eaacab3..c661a1a600 100644 --- a/lib/neutron_plugins/openvswitch +++ b/lib/neutron_plugins/openvswitch @@ -20,12 +20,12 @@ function neutron_plugin_configure_common { function neutron_plugin_configure_service { if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE ovs tenant_network_type gre + iniset /$Q_PLUGIN_CONF_FILE ovs project_network_type gre iniset /$Q_PLUGIN_CONF_FILE ovs tunnel_id_ranges $TENANT_TUNNEL_RANGES elif [[ "$ENABLE_TENANT_VLANS" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE ovs tenant_network_type vlan + iniset /$Q_PLUGIN_CONF_FILE ovs project_network_type vlan else - echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts." + echo "WARNING - The openvswitch plugin is using local project networks, with no connectivity between hosts." fi # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 7fed8bf853..6e79984e9b 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -68,7 +68,7 @@ function neutron_plugin_setup_interface_driver { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 9022f2d382..3f1d6d8b6b 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -28,7 +28,7 @@ source ${TOP_DIR}/lib/neutron_plugins/ovs_source OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git} OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.') OVN_REPO_NAME=${OVN_REPO_NAME:-ovn} -OVN_BRANCH=${OVN_BRANCH:-v20.06.1} +OVN_BRANCH=${OVN_BRANCH:-branch-24.03} # The commit removing OVN bits from the OVS tree, it is the commit that is not # present in OVN tree and is used to distinguish if OVN is part of OVS or not. # https://github.com/openvswitch/ovs/commit/05bf1dbb98b0635a51f75e268ef8aed27601401d @@ -91,16 +91,36 @@ OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38} # http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info} +# OVN metadata agent configuration OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} +# OVN agent configuration +# The OVN agent is configured, by default, with the "metadata" extension. +OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini +OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-metadata} +# The variable TARGET_ENABLE_OVN_AGENT, if True, overrides the OVN Metadata +# agent service (q-ovn-metadata-agent neutron-ovn-metadata-agent) and the OVN +# agent service (q-ovn-agent neutron-ovn-agent) configuration, always disabling +# the first one (OVN Metadata agent) and enabling the second (OVN agent). +# This variable will be removed in 2026.2, along with the OVN Metadata agent +# removal. +TARGET_ENABLE_OVN_AGENT=$(trueorfalse False TARGET_ENABLE_OVN_AGENT) + # If True (default) the node will be considered a gateway node. ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK) +# The variable NEUTRON_BGP_PEER_BRIDGES only applies when OVN_AGENT_EXTENSIONS +# includes "ovn-bgp". When the "ovn-bgp" extension is configured, it should be +# set to some non-empty value. +NEUTRON_BGP_PEER_BRIDGES=${NEUTRON_BGP_PEER_BRIDGES:-} + export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST +TUNNEL_IP=$TUNNEL_ENDPOINT_IP if [[ "$SERVICE_IP_VERSION" == 6 ]]; then OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST] + TUNNEL_IP=[$TUNNEL_IP] fi OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE) @@ -130,6 +150,7 @@ OVN_RUNDIR=$OVS_PREFIX/var/run/ovn NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix) NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent" +NEUTRON_OVN_AGENT_BINARY="neutron-ovn-agent" STACK_GROUP="$( id --group --name "$STACK_USER" )" @@ -153,8 +174,10 @@ fi # Defaults Overwrite # ------------------ - -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn,logger} +# NOTE(ralonsoh): during the eventlet removal, the "logger" mech +# driver has been removed from this list. Re-add it once the removal +# is finished or the mech driver does not call monkey_patch(). +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,geneve} Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"} @@ -174,7 +197,7 @@ function wait_for_db_file { while [ ! -f $1 ]; do sleep 1 count=$((count+1)) - if [ "$count" -gt 5 ]; then + if [ "$count" -gt 40 ]; then die $LINENO "DB File $1 not found" fi done @@ -185,7 +208,7 @@ function wait_for_sock_file { while [ ! -S $1 ]; do sleep 1 count=$((count+1)) - if [ "$count" -gt 5 ]; then + if [ "$count" -gt 40 ]; then die $LINENO "Socket $1 not found" fi done @@ -242,11 +265,12 @@ function _run_process { local cmd="$2" local stop_cmd="$3" local group=$4 - local user=${5:-$STACK_USER} + local user=$5 + local rundir=${6:-$OVS_RUNDIR} local systemd_service="devstack@$service.service" local unit_file="$SYSTEMD_DIR/$systemd_service" - local environment="OVN_RUNDIR=$OVS_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR" + local environment="OVN_RUNDIR=$OVN_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR" echo "Starting $service executed command": $cmd @@ -262,14 +286,14 @@ function _run_process { _start_process $systemd_service - local testcmd="test -e $OVS_RUNDIR/$service.pid" + local testcmd="test -e $rundir/$service.pid" test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1 local service_ctl_file - service_ctl_file=$(ls $OVS_RUNDIR | grep $service | grep ctl) + service_ctl_file=$(ls $rundir | grep $service | grep ctl) if [ -z "$service_ctl_file" ]; then die $LINENO "ctl file for service $service is not present." fi - sudo ovs-appctl -t $OVS_RUNDIR/$service_ctl_file vlog/set console:off syslog:info file:info + sudo ovs-appctl -t $rundir/$service_ctl_file vlog/set console:off syslog:info file:info } function clone_repository { @@ -285,22 +309,25 @@ function clone_repository { function create_public_bridge { # Create the public bridge that OVN will use sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15 - sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$PUBLIC_BRIDGE + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${OVN_BRIDGE_MAPPINGS} _configure_public_network_connectivity } -function _disable_libvirt_apparmor { - if ! sudo aa-status --enabled ; then +function is_ovn_metadata_agent_enabled { + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && [[ "$TARGET_ENABLE_OVN_AGENT" == "False" ]]; then return 0 fi - # NOTE(arosen): This is used as a work around to allow newer versions - # of libvirt to work with ovs configured ports. See LP#1466631. - # requires the apparmor-utils - install_package apparmor-utils - # disables apparmor for libvirtd - sudo aa-complain /etc/apparmor.d/usr.sbin.libvirtd + return 1 } +function is_ovn_agent_enabled { + if is_service_enabled q-ovn-agent neutron-ovn-agent || [[ "$TARGET_ENABLE_OVN_AGENT" == "True" ]]; then + enable_service q-ovn-agent + return 0 + fi + return 1 + +} # OVN compilation functions # ------------------------- @@ -331,8 +358,24 @@ function compile_ovn { ./boot.sh fi + # NOTE(mnaser): OVN requires that you build using the OVS from the + # submodule. + # + # https://github.com/ovn-org/ovn/blob/3fb397b63663297acbcbf794e1233951222ae5af/Documentation/intro/install/general.rst#bootstrapping + # https://github.com/ovn-org/ovn/issues/128 + git submodule update --init + pushd ovs + if [ ! -f configure ] ; then + ./boot.sh + fi if [ ! -f config.status ] || [ configure -nt config.status ] ; then - ./configure --with-ovs-source=$DEST/$OVS_REPO_NAME $prefix $localstatedir + ./configure + fi + make -j$(($(nproc) + 1)) + popd + + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + ./configure $prefix $localstatedir fi make -j$(($(nproc) + 1)) sudo make install @@ -345,7 +388,7 @@ function compile_ovn { # OVN service sanity check function ovn_sanity_check { - if is_service_enabled q-agt neutron-agt; then + if is_service_enabled q-agt neutron-agent; then die $LINENO "The q-agt/neutron-agt service must be disabled with OVN." elif is_service_enabled q-l3 neutron-l3; then die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN." @@ -363,15 +406,8 @@ function install_ovn { # Check the OVN configuration ovn_sanity_check - # Install tox, used to generate the config (see devstack/override-defaults) - pip_install tox - sudo mkdir -p $OVS_RUNDIR sudo chown $(whoami) $OVS_RUNDIR - # NOTE(lucasagomes): To keep things simpler, let's reuse the same - # RUNDIR for both OVS and OVN. This way we avoid having to specify the - # --db option in the ovn-{n,s}bctl commands while playing with DevStack - sudo ln -s $OVS_RUNDIR $OVN_RUNDIR if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then # If OVS is already installed, remove it, because we're about to @@ -395,7 +431,6 @@ function install_ovn { sudo mkdir -p $OVS_PREFIX/var/log/ovn sudo chown $(whoami) $OVS_PREFIX/var/log/ovn else - fixup_ovn_centos install_package $(get_packages openvswitch) install_package $(get_packages ovn) fi @@ -460,7 +495,7 @@ function filter_network_api_extensions { function configure_ovn_plugin { echo "Configuring Neutron for OVN" - if is_service_enabled q-svc ; then + if is_service_enabled q-svc neutron-api; then filter_network_api_extensions populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE" @@ -484,7 +519,9 @@ function configure_ovn_plugin { inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE" fi - if is_service_enabled q-ovn-metadata-agent; then + if is_ovn_metadata_agent_enabled; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True else populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False @@ -505,7 +542,9 @@ function configure_ovn_plugin { fi if is_service_enabled n-api-meta ; then - if is_service_enabled q-ovn-metadata-agent ; then + if is_ovn_metadata_agent_enabled; then + iniset $NOVA_CONF neutron service_metadata_proxy True + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then iniset $NOVA_CONF neutron service_metadata_proxy True fi fi @@ -538,29 +577,42 @@ function configure_ovn { fi # Metadata - if is_service_enabled q-ovn-metadata-agent && is_service_enabled ovn-controller; then + local sample_file="" + local config_file="" + if is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron/plugins/ml2/ovn_agent.ini.sample + config_file=$OVN_AGENT_CONF + elif is_ovn_metadata_agent_enabled && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample + config_file=$OVN_META_CONF + fi + if [ -n "$config_file" ]; then sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2 (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - cp $NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample $OVN_META_CONF - configure_root_helper_options $OVN_META_CONF + cp $sample_file $config_file + configure_root_helper_options $config_file - iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST - iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS - iniset $OVN_META_CONF DEFAULT state_path $NEUTRON_STATE_PATH - iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 - iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE + iniset $config_file DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $config_file DEFAULT nova_metadata_host $OVN_META_DATA_HOST + iniset $config_file DEFAULT metadata_workers $API_WORKERS + iniset $config_file DEFAULT state_path $DATA_DIR/neutron + iniset $config_file ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 + iniset $config_file ovn ovn_sb_connection $OVN_SB_REMOTE if is_service_enabled tls-proxy; then - iniset $OVN_META_CONF ovn \ + iniset $config_file ovn \ ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem - iniset $OVN_META_CONF ovn \ + iniset $config_file ovn \ ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt - iniset $OVN_META_CONF ovn \ + iniset $config_file ovn \ ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key fi + if [[ $config_file == $OVN_AGENT_CONF ]]; then + iniset $config_file agent extensions $OVN_AGENT_EXTENSIONS + iniset $config_file ovn ovn_nb_connection $OVN_NB_REMOTE + fi fi } @@ -572,7 +624,6 @@ function init_ovn { # in the ovn, ovn-nb, or ovs databases. We're going to trash them and # create new ones on each devstack run. - _disable_libvirt_apparmor local mkdir_cmd="mkdir -p ${OVN_DATADIR}" if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then @@ -586,6 +637,7 @@ function init_ovn { rm -f $OVS_DATADIR/.*.db.~lock~ sudo rm -f $OVN_DATADIR/*.db sudo rm -f $OVN_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_RUNDIR/*.sock } function _start_ovs { @@ -612,12 +664,12 @@ function _start_ovs { dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db" fi dbcmd+=" $OVS_DATADIR/conf.db" - _run_process ovsdb-server "$dbcmd" + _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" # Note: ovn-controller will create and configure br-int once it is started. # So, no need to create it now because nothing depends on that bridge here. local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach" - _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" + _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" else _start_process "$OVSDB_SERVER_SERVICE" _start_process "$OVS_VSWITCHD_SERVICE" @@ -637,14 +689,20 @@ function _start_ovs { sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve" - sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP" - sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname=$(hostname) # Select this chassis to host gateway routers if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw" fi - if is_provider_network || [[ $Q_USE_PROVIDERNET_FOR_PUBLIC == "True" ]]; then + if [[ "$OVN_AGENT_EXTENSIONS" =~ 'ovn-bgp' ]]; then + if [[ -z "$NEUTRON_BGP_PEER_BRIDGES" ]]; then + echo "NEUTRON_BGP_PEER_BRIDGES must be set when ovn-bgp extension is enabled" + return 1 + fi + sudo ovs-vsctl --no-wait -- set Open_vSwitch . external-ids:neutron-bgp-peer-bridges=$NEUTRON_BGP_PEER_BRIDGES + elif is_provider_network || [[ $Q_USE_PROVIDERNET_FOR_PUBLIC == "True" ]]; then ovn_base_setup_bridge $OVS_PHYSICAL_BRIDGE sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${PHYSICAL_NETWORK}:${OVS_PHYSICAL_BRIDGE} fi @@ -652,36 +710,34 @@ function _start_ovs { if is_service_enabled ovn-controller-vtep ; then ovn_base_setup_bridge br-v vtep-ctl add-ps br-v - vtep-ctl set Physical_Switch br-v tunnel_ips=$HOST_IP + vtep-ctl set Physical_Switch br-v tunnel_ips=$TUNNEL_IP enable_service ovs-vtep local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v" - _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" + _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" vtep-ctl set-manager tcp:$HOST_IP:6640 fi fi } -function _start_ovn_services { - _start_process "$OVSDB_SERVER_SERVICE" - _start_process "$OVS_VSWITCHD_SERVICE" +function _wait_for_ovn_and_set_custom_config { + # Wait for the service to be ready + # Check for socket and db files for both OVN NB and SB + wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock + wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock + wait_for_db_file $OVN_DATADIR/ovnnb_db.db + wait_for_db_file $OVN_DATADIR/ovnsb_db.db - if is_service_enabled ovn-northd ; then - _start_process "$OVN_NORTHD_SERVICE" - fi - if is_service_enabled ovn-controller ; then - _start_process "$OVN_CONTROLLER_SERVICE" - fi - if is_service_enabled ovn-controller-vtep ; then - _start_process "$OVN_CONTROLLER_VTEP_SERVICE" - fi - if is_service_enabled ovs-vtep ; then - _start_process "devstack@ovs-vtep.service" - fi - if is_service_enabled q-ovn-metadata-agent; then - _start_process "devstack@q-ovn-metadata-agent.service" + if is_service_enabled tls-proxy; then + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem fi + + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL + sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL } # start_ovn() - Start running processes, including screen @@ -700,26 +756,13 @@ function start_ovn { local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd" local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd" - _run_process ovn-northd "$cmd" "$stop_cmd" + _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" else _start_process "$OVN_NORTHD_SERVICE" fi - # Wait for the service to be ready - # Check for socket and db files for both OVN NB and SB - wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock - wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock - wait_for_db_file $OVN_DATADIR/ovnnb_db.db - wait_for_db_file $OVN_DATADIR/ovnsb_db.db + _wait_for_ovn_and_set_custom_config - if is_service_enabled tls-proxy; then - sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem - sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem - fi - sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 - sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 - sudo ovs-appctl -t $OVS_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL - sudo ovs-appctl -t $OVS_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL fi if is_service_enabled ovn-controller ; then @@ -727,7 +770,7 @@ function start_ovn { local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller" local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller" - _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" + _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" else _start_process "$OVN_CONTROLLER_SERVICE" fi @@ -736,19 +779,23 @@ function start_ovn { if is_service_enabled ovn-controller-vtep ; then if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE" - _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" + _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" "$OVN_RUNDIR" else _start_process "$OVN_CONTROLLER_VTEP_SERVICE" fi fi - if is_service_enabled q-ovn-metadata-agent; then + if is_ovn_metadata_agent_enabled; then run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF" # Format logging setup_logging $OVN_META_CONF fi - _start_ovn_services + if is_ovn_agent_enabled; then + run_process q-ovn-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_AGENT_BINARY --config-file $OVN_AGENT_CONF" + # Format logging + setup_logging $OVN_AGENT_CONF + fi } function _stop_ovs_dp { @@ -759,17 +806,32 @@ function _stop_ovs_dp { function _stop_process { local service=$1 echo "Stopping process $service" - if $SYSTEMCTL is-enabled $service; then + # Stop if running, regardless of enabled state + if $SYSTEMCTL is-active $service; then $SYSTEMCTL stop $service + fi + if $SYSTEMCTL is-enabled $service; then $SYSTEMCTL disable $service fi } function stop_ovn { - if is_service_enabled q-ovn-metadata-agent; then - sudo pkill -9 -f haproxy || : + # NOTE(ralonsoh): this check doesn't use "is_ovn_metadata_agent_enabled", + # instead it relies only in the configured services, disregarding the + # flag "TARGET_ENABLE_OVN_AGENT". It is needed to force the OVN Metadata + # agent stop in case the flag "TARGET_ENABLE_OVN_AGENT" is set. + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : _stop_process "devstack@q-ovn-metadata-agent.service" fi + if is_ovn_agent_enabled; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : + _stop_process "devstack@q-ovn-agent.service" + fi if is_service_enabled ovn-controller-vtep ; then _stop_process "$OVN_CONTROLLER_VTEP_SERVICE" fi @@ -783,10 +845,22 @@ function stop_ovn { _stop_process "devstack@ovs-vtep.service" fi + # Clear OVS external-ids before stopping to prevent stale config on restack + if sudo ovs-vsctl show &>/dev/null; then + sudo ovs-vsctl --if-exists clear open_vswitch . external-ids + fi + _stop_process "$OVS_VSWITCHD_SERVICE" _stop_process "$OVSDB_SERVER_SERVICE" _stop_ovs_dp + + # Clean up runtime files that can prevent restart + sudo rm -f $OVS_RUNDIR/*.sock $OVS_RUNDIR/*.pid $OVS_RUNDIR/*.ctl + sudo rm -f $OVN_RUNDIR/*.sock $OVN_RUNDIR/*.pid $OVN_RUNDIR/*.ctl + # Clean up database lock files + sudo rm -f $OVS_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_DATADIR/.*.db.~lock~ } function _cleanup { @@ -812,5 +886,5 @@ function cleanup_ovn { _cleanup $ovs_path fi - sudo rm -f $OVN_RUNDIR + sudo rm -rf $OVN_RUNDIR } diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index cc41a8cd46..adabc56412 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -80,19 +80,6 @@ function _neutron_ovs_base_install_agent_packages { elif is_fedora; then restart_service openvswitch sudo systemctl enable openvswitch - elif is_suse; then - if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then - restart_service openvswitch-switch - else - # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971 - if [[ $DISTRO =~ "tumbleweed" ]]; then - sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch - fi - restart_service openvswitch || { - journalctl -xe || : - systemctl status openvswitch - } - fi fi fi } diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index 9ae5555afb..6b6f531a01 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -20,7 +20,7 @@ Q_BUILD_OVS_FROM_GIT=$(trueorfalse False Q_BUILD_OVS_FROM_GIT) OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.') OVS_REPO_NAME=${OVS_REPO_NAME:-ovs} -OVS_BRANCH=${OVS_BRANCH:-0047ca3a0290f1ef954f2c76b31477cf4b9755f5} +OVS_BRANCH=${OVS_BRANCH:-branch-3.3} # Functions @@ -33,9 +33,9 @@ function load_module { local fatal=$2 if [ "$(trueorfalse True fatal)" == "True" ]; then - sudo modprobe $module || (dmesg && die $LINENO "FAILED TO LOAD $module") + sudo modprobe $module || (sudo dmesg && die $LINENO "FAILED TO LOAD $module") else - sudo modprobe $module || (echo "FAILED TO LOAD $module" && dmesg) + sudo modprobe $module || (echo "FAILED TO LOAD $module" && sudo dmesg) fi } @@ -87,9 +87,15 @@ function prepare_for_ovs_compilation { install_package kernel-devel-$KERNEL_VERSION install_package kernel-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package openssl-devel + fi elif is_ubuntu ; then install_package linux-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package libssl-dev + fi fi } @@ -97,7 +103,7 @@ function prepare_for_ovs_compilation { function load_ovs_kernel_modules { load_module openvswitch load_module vport-geneve False - dmesg | tail + sudo dmesg | tail } # reload_ovs_kernel_modules() - reload openvswitch kernel module @@ -158,10 +164,8 @@ function compile_ovs { sudo make install if [[ "$build_modules" == "True" ]]; then sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install - reload_ovs_kernel_modules - else - load_ovs_kernel_modules fi + reload_ovs_kernel_modules cd $_pwd } @@ -176,12 +180,6 @@ function action_openvswitch { ${action}_service openvswitch-switch elif is_fedora; then ${action}_service openvswitch - elif is_suse; then - if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then - ${action}_service openvswitch-switch - else - ${action}_service openvswitch - fi fi } diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index fbd4692bba..d0a5d6b8c2 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -35,7 +35,7 @@ Q_PUBLIC_VETH_INT=${Q_PUBLIC_VETH_INT:-veth-pub-int} # The next variable is configured by plugin # e.g. _configure_neutron_l3_agent or lib/neutron_plugins/* # -# L3 routers exist per tenant +# L3 routers exist per project Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} @@ -47,7 +47,8 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # used for the network. In case of ofagent, you should add the # corresponding entry to your OFAGENT_PHYSICAL_INTERFACE_MAPPINGS. # For openvswitch agent, you should add the corresponding entry to -# your OVS_BRIDGE_MAPPINGS. +# your OVS_BRIDGE_MAPPINGS and for OVN add the corresponding entry +# to your OVN_BRIDGE_MAPPINGS. # # eg. (ofagent) # Q_USE_PROVIDERNET_FOR_PUBLIC=True @@ -60,6 +61,11 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # PUBLIC_PHYSICAL_NETWORK=public # OVS_BRIDGE_MAPPINGS=public:br-ex # +# eg. (ovn agent) +# Q_USER_PROVIDERNET_FOR_PUBLIC=True +# PUBLIC_PHYSICAL_NETWORK=public +# OVN_BRIDGE_MAPPINGS=public:br-ex +# # The provider-network-type defaults to flat, however, the values # PUBLIC_PROVIDERNET_TYPE and PUBLIC_PROVIDERNET_SEGMENTATION_ID could # be set to specify the parameters for an alternate network type. @@ -88,6 +94,7 @@ NETWORK_GATEWAY=${NETWORK_GATEWAY:-} PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-} PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} +PUBLIC_SUBNET_ENABLE_DHCP=${PUBLIC_SUBNET_ENABLE_DHCP:-False} # Subnetpool defaults USE_SUBNETPOOL=${USE_SUBNETPOOL:-True} @@ -166,14 +173,14 @@ function create_neutron_initial_network { if is_provider_network; then die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" - NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share -f value -c id) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK" if [[ "$IP_VERSION" =~ 4.* ]]; then if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) + SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} -f value -c id) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME" fi @@ -183,7 +190,7 @@ function create_neutron_initial_network { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi - IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) + IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} -f value -c id) die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME" fi @@ -193,7 +200,7 @@ function create_neutron_initial_network { sudo ip link set $PUBLIC_INTERFACE up fi else - NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" -f value -c id) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME" if [[ "$IP_VERSION" =~ 4.* ]]; then @@ -210,12 +217,12 @@ function create_neutron_initial_network { if is_networking_extension_supported "router" && is_networking_extension_supported "external-net"; then # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then - # create a tenant-owned router. - ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + # create a project-owned router. + ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. - ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" fi @@ -225,9 +232,9 @@ function create_neutron_initial_network { fi # Create an external network, and a subnet. Configure the external network as router gw if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} -f value -c id) else - EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS -f value -c id) fi die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" @@ -257,7 +264,7 @@ function _neutron_create_private_subnet_v4 { subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} " subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" local subnet_id - subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet" echo $subnet_id } @@ -278,19 +285,23 @@ function _neutron_create_private_subnet_v6 { subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} " subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " local ipv6_subnet_id - ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet" echo $ipv6_subnet_id } # Create public IPv4 subnet function _neutron_create_public_subnet_v4 { + local dhcp_param="--no-dhcp" + if [[ "$PUBLIC_SUBNET_ENABLE_DHCP" == "True" ]]; then + dhcp_param="--dhcp" + fi local subnet_params="--ip-version 4 " subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " fi - subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE --no-dhcp " + subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE $dhcp_param " subnet_params+="$PUBLIC_SUBNET_NAME" local id_and_ext_gw_ip id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') @@ -300,9 +311,13 @@ function _neutron_create_public_subnet_v4 { # Create public IPv6 subnet function _neutron_create_public_subnet_v6 { + local dhcp_param="--no-dhcp" + if [[ "$PUBLIC_SUBNET_ENABLE_DHCP" == "True" ]]; then + dhcp_param="--dhcp" + fi local subnet_params="--ip-version 6 " subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY " - subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE --no-dhcp " + subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE $dhcp_param " subnet_params+="$IPV6_PUBLIC_SUBNET_NAME" local ipv6_id_and_ext_gw_ip ipv6_id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') @@ -323,21 +338,11 @@ function _neutron_configure_router_v4 { openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID # This logic is specific to using OVN or the l3-agent for layer 3 - if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then # Configure and enable public bridge local ext_gw_interface="none" if is_neutron_ovs_base_plugin; then ext_gw_interface=$(_neutron_get_ext_gw_interface) - elif [[ "$Q_AGENT" = "linuxbridge" ]]; then - # Get the device the neutron router and network for $FIXED_RANGE - # will be using. - if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - # in provider nets a bridge mapping uses the public bridge directly - ext_gw_interface=$PUBLIC_BRIDGE - else - # e.x. brq3592e767-da for NET_ID 3592e767-da66-4bcb-9bec-cdb03cd96102 - ext_gw_interface=brq${EXT_NET_ID:0:11} - fi fi if [[ "$ext_gw_interface" != "none" ]]; then local cidr_len=${FLOATING_RANGE#*/} @@ -372,7 +377,7 @@ function _neutron_configure_router_v6 { fi # This logic is specific to using OVN or the l3-agent for layer 3 - if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then # if the Linux host considers itself to be a router then it will # ignore all router advertisements # Ensure IPv6 RAs are accepted on interfaces with a default route. @@ -391,7 +396,7 @@ function _neutron_configure_router_v6 { # Override global IPV6_ROUTER_GW_IP with the true value from neutron # NOTE(slaweq): when enforce scopes is enabled in Neutron, router's # gateway ports aren't visible in API because such ports don't belongs - # to any tenant. Because of that, at least temporary we need to find + # to any project. Because of that, at least temporary we need to find # IPv6 address of the router's gateway in a bit different way. # It can be reverted when bug # https://bugs.launchpad.net/neutron/+bug/1959332 will be fixed diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 5b32468d21..757a562ee6 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -12,7 +12,7 @@ AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" function neutron_agent_metering_configure_common { - _neutron_service_plugin_class_add $METERING_PLUGIN + neutron_service_plugin_class_add $METERING_PLUGIN } function neutron_agent_metering_configure_agent { diff --git a/lib/neutron_plugins/services/ovn-bgp b/lib/neutron_plugins/services/ovn-bgp new file mode 100644 index 0000000000..dc6a2c58fc --- /dev/null +++ b/lib/neutron_plugins/services/ovn-bgp @@ -0,0 +1,5 @@ +#!/bin/bash + +function configure_ovn_bgp_service_plugin { + neutron_service_plugin_class_add "ovn-bgp" +} diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos index af9eb3d5b4..c11c315586 100644 --- a/lib/neutron_plugins/services/qos +++ b/lib/neutron_plugins/services/qos @@ -6,7 +6,7 @@ function configure_qos_service_plugin { function configure_qos_core_plugin { - configure_qos_$NEUTRON_CORE_PLUGIN + configure_qos_$Q_PLUGIN } diff --git a/lib/neutron_plugins/services/segments b/lib/neutron_plugins/services/segments new file mode 100644 index 0000000000..08936bae49 --- /dev/null +++ b/lib/neutron_plugins/services/segments @@ -0,0 +1,10 @@ +#!/bin/bash + +function configure_segments_service_plugin { + neutron_service_plugin_class_add segments +} + +function configure_segments_extension { + configure_segments_service_plugin +} + diff --git a/lib/nova b/lib/nova index 4c14374d0f..dcb4f46935 100644 --- a/lib/nova +++ b/lib/nova @@ -53,11 +53,19 @@ NOVA_COND_CONF=$NOVA_CONF_DIR/nova.conf NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf NOVA_API_DB=${NOVA_API_DB:-nova_api} -NOVA_UWSGI=$NOVA_BIN_DIR/nova-api-wsgi -NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi +NOVA_UWSGI=nova.wsgi.osapi_compute:application +NOVA_METADATA_UWSGI=nova.wsgi.metadata:application NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini +# Allow forcing the stable compute uuid to something specific. This would be +# done by deployment tools that pre-allocate the UUIDs, but it is also handy +# for developers that need to re-stack a compute-only deployment multiple +# times. Since the DB is non-local and not erased on an unstack, making it +# stay the same each time is what developers want. Set to a uuid here or +# leave it blank for default allocate-on-start behavior. +NOVA_CPU_UUID="" + # The total number of cells we expect. Must be greater than one and doesn't # count cell0. NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1} @@ -67,13 +75,10 @@ NOVA_CPU_CELL=${NOVA_CPU_CELL:-1} NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} -# Toggle for deploying Nova-API under a wsgi server. We default to -# true to use UWSGI, but allow False so that fall back to the -# eventlet server can happen for grenade runs. -# NOTE(cdent): We can adjust to remove the eventlet-base api service -# after pike, at which time we can stop using NOVA_USE_MOD_WSGI to -# mean "use uwsgi" because we'll be always using uwsgi. -NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-True} +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +NOVA_SERVICE_REPORT_INTERVAL=${NOVA_SERVICE_REPORT_INTERVAL:-120} if is_service_enabled tls-proxy; then NOVA_SERVICE_PROTOCOL="https" @@ -97,33 +102,34 @@ NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVI METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to disable the compute API policies scope and new defaults. +# By Default, it is True. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +NOVA_ENFORCE_SCOPE=$(trueorfalse True NOVA_ENFORCE_SCOPE) + +if [[ $SERVICE_IP_VERSION == 6 ]]; then + NOVA_MY_IP="$HOST_IPV6" +else + NOVA_MY_IP="$HOST_IP" +fi + # Option to enable/disable config drive # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} # The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with # the default filters. -NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" +NOVA_FILTERS="ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" QEMU_CONF=/etc/libvirt/qemu.conf -# Set default defaults here as some hypervisor drivers override these -PUBLIC_INTERFACE_DEFAULT=br100 -# Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that -# the default isn't completely crazy. This will match ``eth*``, ``em*``, or -# the new ``p*`` interfaces, then basically picks the first -# alphabetically. It's probably wrong, however it's less wrong than -# always using ``eth0`` which doesn't exist on new Linux distros at all. -GUEST_INTERFACE_DEFAULT=$(ip link \ - | grep 'state UP' \ - | awk '{print $2}' \ - | sed 's/://' \ - | grep ^[ep] \ - | head -1) - # ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration. # In multi-node setups allows compute hosts to not run ``n-novnc``. NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED) +# same as ``NOVA_VNC_ENABLED`` but for Spice and serial console respectively. +NOVA_SPICE_ENABLED=$(trueorfalse False NOVA_SPICE_ENABLED) +NOVA_SERIAL_ENABLED=$(trueorfalse False NOVA_SERIAL_ENABLED) # Get hypervisor configuration # ---------------------------- @@ -162,6 +168,9 @@ NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0} # Whether to use Keystone unified limits instead of legacy quota limits. NOVA_USE_UNIFIED_LIMITS=$(trueorfalse False NOVA_USE_UNIFIED_LIMITS) +# TB Cache Size in MiB for qemu guests +NOVA_LIBVIRT_TB_CACHE_SIZE=${NOVA_LIBVIRT_TB_CACHE_SIZE:-0} + # Functions # --------- @@ -219,6 +228,9 @@ function cleanup_nova { done sudo iscsiadm --mode node --op delete || true + # Disconnect all nvmeof connections + sudo nvme disconnect-all || true + # Clean out the instances directory. sudo rm -rf $NOVA_INSTANCES_PATH/* fi @@ -234,8 +246,8 @@ function cleanup_nova { stop_process "n-api" stop_process "n-api-meta" - remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" - remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" + remove_uwsgi_config "$NOVA_UWSGI_CONF" "nova-api" + remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "nova-metadata" if [[ "$NOVA_BACKEND" == "LVM" ]]; then clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME @@ -306,6 +318,7 @@ function configure_nova { fi fi + # Due to cinder bug #1966513 we ALWAYS need an initiator name for LVM # Ensure each compute host uses a unique iSCSI initiator echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi @@ -324,14 +337,30 @@ EOF # set chap algorithms. The default chap_algorithm is md5 which will # not work under FIPS. - # FIXME(alee) For some reason, this breaks openeuler. Openeuler devs should weigh in - # and determine the correct solution for openeuler here - if ! is_openeuler; then - iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" - fi + iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" - # ensure that iscsid is started, even when disabled by default - restart_service iscsid + if [[ $CINDER_TARGET_HELPER != 'nvmet' ]]; then + # ensure that iscsid is started, even when disabled by default + restart_service iscsid + + # For NVMe-oF we need different packages that many not be present + else + install_package nvme-cli + sudo modprobe nvme-fabrics + + # Ensure NVMe is ready and create the Soft-RoCE device over the networking interface + if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then + sudo modprobe nvme-rdma + iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $NOVA_MY_IP | awk '{print $1}'`} + if ! sudo rdma link | grep $iface ; then + sudo rdma link add rxe_$iface type rxe netdev $iface + fi + elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then + sudo modprobe nvme-tcp + else # 'nvmet_fc' + sudo modprobe nvme-fc + fi + fi fi # Rebuild the config file from scratch @@ -359,11 +388,7 @@ function create_nova_accounts { create_service_user "nova" "admin" local nova_api_url - if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then - nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT" - else - nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute" - fi + nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute" get_or_create_service "nova_legacy" "compute_legacy" "Nova Compute Service (Legacy 2.0)" get_or_create_endpoint \ @@ -422,20 +447,23 @@ function create_nova_conf { iniset $NOVA_CONF filter_scheduler enabled_filters "$NOVA_FILTERS" iniset $NOVA_CONF scheduler workers "$API_WORKERS" iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME" - if [[ $SERVICE_IP_VERSION == 6 ]]; then - iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6" - else - iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" - fi + iniset $NOVA_CONF DEFAULT my_ip "$NOVA_MY_IP" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT shutdown_timeout $NOVA_SHUTDOWN_TIMEOUT + # Enable errors if response validation fails. We want this enabled in CI + # and development contexts to highlights bugs in our response schemas. + iniset $NOVA_CONF api response_validation error + iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager - if is_fedora || is_suse; then - # nova defaults to /usr/local/bin, but fedora and suse pip like to + iniset $NOVA_CONF DEFAULT report_interval $NOVA_SERVICE_REPORT_INTERVAL + iniset $NOVA_CONF DEFAULT service_down_time $(($NOVA_SERVICE_REPORT_INTERVAL * 6)) + + if is_fedora; then + # nova defaults to /usr/local/bin, but fedora pip like to # install things in /usr/bin iniset $NOVA_CONF DEFAULT bindir "/usr/bin" fi @@ -443,7 +471,7 @@ function create_nova_conf { # only setup database connections and cache backend if there are services # that require them running on the host. The ensures that n-cpu doesn't # leak a need to use the db in a multinode scenario. - if is_service_enabled n-api n-cond n-sched; then + if is_service_enabled n-api n-cond n-sched n-spice n-novnc n-sproxy; then # If we're in multi-tier cells mode, we want our control services pointing # at cell0 instead of cell1 to ensure isolation. If not, we point everything # at the main database like normal. @@ -473,10 +501,12 @@ function create_nova_conf { NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//") fi iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" - if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" - iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT + if [[ "$NOVA_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then + iniset $NOVA_CONF oslo_policy enforce_new_defaults True + iniset $NOVA_CONF oslo_policy enforce_scope True + else + iniset $NOVA_CONF oslo_policy enforce_new_defaults False + iniset $NOVA_CONF oslo_policy enforce_scope False fi configure_keystone_authtoken_middleware $NOVA_CONF nova @@ -486,6 +516,10 @@ function create_nova_conf { configure_cinder_access fi + if is_service_enabled manila; then + configure_manila_access + fi + if [ -n "$NOVA_STATE_PATH" ]; then iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH" iniset $NOVA_CONF oslo_concurrency lock_path "$NOVA_STATE_PATH" @@ -502,7 +536,7 @@ function create_nova_conf { # nova defaults to genisoimage but only mkisofs is available for 15.0+ # rhel provides mkisofs symlink to genisoimage or xorriso appropiately - if is_suse || is_fedora; then + if is_fedora; then iniset $NOVA_CONF DEFAULT mkisofs_cmd /usr/bin/mkisofs fi @@ -512,11 +546,11 @@ function create_nova_conf { iniset $NOVA_CONF upgrade_levels compute "auto" if is_service_enabled n-api; then - write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" + write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" "" "nova-api" fi if is_service_enabled n-api-meta; then - write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" + write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" "nova-metadata" fi if is_service_enabled ceilometer; then @@ -598,32 +632,19 @@ function create_nova_conf { function configure_placement_nova_compute { # Use the provided config file path or default to $NOVA_CONF. local conf=${1:-$NOVA_CONF} - iniset $conf placement auth_type "password" - iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf placement username placement - iniset $conf placement password "$SERVICE_PASSWORD" - iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf placement project_name "$SERVICE_TENANT_NAME" - iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf placement region_name "$REGION_NAME" + configure_keystoneauth $conf nova placement } # Configure access to cinder. function configure_cinder_access { iniset $NOVA_CONF cinder os_region_name "$REGION_NAME" - iniset $NOVA_CONF cinder auth_type "password" - iniset $NOVA_CONF cinder auth_url "$KEYSTONE_SERVICE_URI" # NOTE(mriedem): This looks a bit weird but we use the nova user here # since it has the admin role and the cinder user does not. This is # similar to using the nova user in init_nova_service_user_conf. We need # to use a user with the admin role for background tasks in nova to # be able to GET block-storage API resources owned by another project # since cinder has low-level "is_admin" checks in its DB API. - iniset $NOVA_CONF cinder username nova - iniset $NOVA_CONF cinder password "$SERVICE_PASSWORD" - iniset $NOVA_CONF cinder user_domain_name "$SERVICE_DOMAIN_NAME" - iniset $NOVA_CONF cinder project_name "$SERVICE_TENANT_NAME" - iniset $NOVA_CONF cinder project_domain_name "$SERVICE_DOMAIN_NAME" + configure_keystoneauth $NOVA_CONF nova cinder if is_service_enabled tls-proxy; then CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} @@ -631,6 +652,11 @@ function configure_cinder_access { fi } +# Configure access to manila. +function configure_manila_access { + configure_keystoneauth $NOVA_CONF nova manila +} + function configure_console_compute { # If we are running multiple cells (and thus multiple console proxies) on a # single host, we offset the ports to avoid collisions. We need to @@ -677,7 +703,7 @@ function configure_console_compute { iniset $NOVA_CPU_CONF vnc enabled false fi - if is_service_enabled n-spice; then + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then # Address on which instance spiceservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr} @@ -687,7 +713,7 @@ function configure_console_compute { iniset $NOVA_CPU_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" fi - if is_service_enabled n-sproxy; then + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then iniset $NOVA_CPU_CONF serial_console enabled True iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6082 + offset))/" fi @@ -783,25 +809,16 @@ function configure_nova_unified_limits { iniset $NOVA_CONF oslo_limit username nova iniset $NOVA_CONF oslo_limit auth_type password iniset $NOVA_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI - iniset $NOVA_CONF oslo_limit system_scope all - iniset $NOVA_CONF oslo_limit endpoint_id \ - $(openstack endpoint list --service nova -f value -c ID) - - # Allow the nova service user to read quotas - openstack --os-cloud devstack-system-admin role add --user nova \ - --user-domain $SERVICE_DOMAIN_NAME --system all reader + iniset $NOVA_CONF oslo_limit project_name service + iniset $NOVA_CONF oslo_limit project_domain_name $SERVICE_DOMAIN_NAME + iniset $NOVA_CONF oslo_limit endpoint_interface public + iniset $NOVA_CONF oslo_limit endpoint_service_type compute + iniset $NOVA_CONF oslo_limit endpoint_region_name $REGION_NAME } function init_nova_service_user_conf { iniset $NOVA_CONF service_user send_service_user_token True - iniset $NOVA_CONF service_user auth_type password - iniset $NOVA_CONF service_user auth_url "$KEYSTONE_SERVICE_URI" - iniset $NOVA_CONF service_user username nova - iniset $NOVA_CONF service_user password "$SERVICE_PASSWORD" - iniset $NOVA_CONF service_user user_domain_name "$SERVICE_DOMAIN_NAME" - iniset $NOVA_CONF service_user project_name "$SERVICE_PROJECT_NAME" - iniset $NOVA_CONF service_user project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $NOVA_CONF service_user auth_strategy keystone + configure_keystoneauth $NOVA_CONF nova service_user } function conductor_conf { @@ -903,8 +920,23 @@ function install_nova { # a websockets/html5 or flash powered VNC console for vm instances NOVNC_FROM_PACKAGE=$(trueorfalse False NOVNC_FROM_PACKAGE) if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then + # Installing novnc on Debian bullseye breaks the global pip + # install. This happens because novnc pulls in distro cryptography + # which will be prefered by distro pip, but if anything has + # installed pyOpenSSL from pypi (keystone) that is not compatible + # with distro cryptography. Fix this by installing + # python3-openssl (pyOpenSSL) from the distro which pip will prefer + # on Debian. Ubuntu has inverse problems so we only do this for + # Debian. + local novnc_packages + novnc_packages="novnc" + GetOSVersion + if [[ "$os_VENDOR" = "Debian" ]] ; then + novnc_packages="$novnc_packages python3-openssl" + fi + NOVNC_WEB_DIR=/usr/share/novnc - install_package novnc + install_package $novnc_packages else NOVNC_WEB_DIR=$DEST/novnc git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH @@ -930,33 +962,17 @@ function install_nova { # start_nova_api() - Start the API process ahead of other things function start_nova_api { - # Get right service port for testing - local service_port=$NOVA_SERVICE_PORT - local service_protocol=$NOVA_SERVICE_PROTOCOL - local nova_url - if is_service_enabled tls-proxy; then - service_port=$NOVA_SERVICE_PORT_INT - service_protocol="http" - fi - # Hack to set the path for rootwrap local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH - if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then - run_process n-api "$NOVA_BIN_DIR/nova-api" - nova_url=$service_protocol://$SERVICE_HOST:$service_port - # Start proxy if tsl enabled - if is_service_enabled tls-proxy; then - start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT - fi - else - run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" - nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/ - fi + run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" echo "Waiting for nova-api to start..." - if ! wait_for_service $SERVICE_TIMEOUT $nova_url; then + # Check that the nova API service is running. + local service_url=$NOVA_SERVICE_PROTOCOL://$SERVICE_HOST/compute/v2.1/ + + if ! wait_for_service $SERVICE_TIMEOUT $service_url; then die $LINENO "nova-api did not start" fi @@ -1000,6 +1016,10 @@ function start_nova_compute { # by the compute process. configure_console_compute + # Set rebuild timeout longer for BFV instances because we likely have + # slower disk than expected. Default is 20s/GB + iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 180 + # Configure the OVSDB connection for os-vif if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640" @@ -1010,7 +1030,14 @@ function start_nova_compute { iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True fi + if [[ "$NOVA_CPU_UUID" ]]; then + echo -n $NOVA_CPU_UUID > $NOVA_CONF_DIR/compute_id + fi + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + if [ ${NOVA_LIBVIRT_TB_CACHE_SIZE} -gt 0 ]; then + iniset $NOVA_CPU_CONF libvirt tb_cache_size ${NOVA_LIBVIRT_TB_CACHE_SIZE} + fi # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the # **$LIBVIRT_GROUP** group. @@ -1027,6 +1054,15 @@ function start_nova_compute { # gets its own configuration and own log file. local fake_conf="${NOVA_FAKE_CONF}-${i}" iniset $fake_conf DEFAULT host "${HOSTNAME}${i}" + # Ensure that each fake compute has its own state path so that it + # can have its own compute_id file + local state_path + state_path="$NOVA_STATE_PATH/${HOSTNAME}${i}" + COMPUTE_ID=$(uuidgen) + sudo mkdir -p "$state_path" + iniset $fake_conf DEFAULT state_path "$state_path" + # use the generated UUID as the stable compute node UUID + echo "$COMPUTE_ID" | sudo tee "$state_path/compute_id" run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf" done else @@ -1048,11 +1084,7 @@ function start_nova_rest { local compute_cell_conf=$NOVA_CONF run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" - if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then - run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" - else - run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" - fi + run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" export PATH=$old_path } diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 3e7d2801d6..e421c5bac0 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -53,9 +53,33 @@ EOF sudo systemctl daemon-reload } +function enable_debian_12_backports { + # we are using debian backport repos to work around a qemu-img bug + # related to luks encrypted volumes, this requires us to use newer + # versions of qemu libvirt and the supporting packages for vm firmware + # This is related to https://bugs.launchpad.net/ceph/+bug/2116852 + # and https://gitlab.com/qemu-project/qemu/-/commit/145f12e + if ! grep -qr "bookworm-backports" /etc/apt/sources.list /etc/apt/sources.list.d/ 2>/dev/null; then + echo "deb http://deb.debian.org/debian bookworm-backports main" | \ + sudo tee /etc/apt/sources.list.d/bookworm-backports.list + fi + + sudo tee /etc/apt/preferences.d/99-nova-backports < - elif is_fedora || is_suse; then + + if [[ ${DISTRO} =~ "bookworm" ]] || [[ ${DISTRO} =~ "trixie" ]]; then + # Since debian 13 spice is supported in qemu through an additional + # package. We also enabled backports on debian 12 which makes the + # same change. + install_package qemu-system-modules-spice + fi + + elif is_fedora; then # Optionally enable the virt-preview repo when on Fedora if [[ $DISTRO =~ f[0-9][0-9] ]] && [[ ${ENABLE_FEDORA_VIRT_PREVIEW_REPO} == "True" ]]; then @@ -82,16 +115,31 @@ function install_libvirt { sudo dnf copr enable -y @virtmaint-sig/virt-preview fi + if is_openeuler; then + qemu_package=qemu + else + qemu_package=qemu-kvm + fi + # Note that in CentOS/RHEL this needs to come from the RDO # repositories (qemu-kvm-ev ... which provides this package) # as the base system version is too old. We should have # pre-installed these - install_package qemu-kvm - install_package libvirt libvirt-devel python3-libvirt + install_package $qemu_package + install_package libvirt libvirt-devel + install_package edk2-ovmf + + if [[ $DISTRO =~ rhel9 ]]; then + pip_install_gr libvirt-python + else + install_package python3-libvirt + fi if is_arch "aarch64"; then install_package edk2-aarch64 fi + + install_package swtpm swtpm-tools mdevctl fi if [[ $DEBUG_LIBVIRT_COREDUMPS == True ]]; then @@ -115,8 +163,8 @@ cgroup_device_acl = [ EOF fi - if is_fedora || is_suse; then - # Starting with fedora 18 and opensuse-12.3 enable stack-user to + if is_fedora; then + # Starting with fedora 18 enable stack-user to # virsh -c qemu:///system by creating a policy-kit rule for # stack-user using the new Javascript syntax rules_dir=/etc/polkit-1/rules.d diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake index 87ee49fa4b..39cb45ca67 100644 --- a/lib/nova_plugins/hypervisor-fake +++ b/lib/nova_plugins/hypervisor-fake @@ -36,7 +36,7 @@ function cleanup_nova_hypervisor { # configure_nova_hypervisor - Set config files, create data dirs, etc function configure_nova_hypervisor { - iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriver" + iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriverWithoutFakeNodes" # Disable arbitrary limits iniset $NOVA_CONF quota driver nova.quota.NoopQuotaDriver } diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index f058e9bb53..9a39c798a8 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -53,6 +53,10 @@ function configure_nova_hypervisor { iniset $NOVA_CONF ironic project_domain_id default iniset $NOVA_CONF ironic project_name demo fi + if is_ironic_sharded; then + iniset $NOVA_CONF ironic shard $IRONIC_SHARD_1_NAME + fi + iniset $NOVA_CONF ironic user_domain_id default iniset $NOVA_CONF ironic region_name $REGION_NAME diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index c1cd132548..163688f7f2 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -41,7 +41,7 @@ function configure_nova_hypervisor { iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF libvirt cpu_mode "$LIBVIRT_CPU_MODE" if [ "$LIBVIRT_CPU_MODE" == "custom" ] ; then - iniset $NOVA_CONF libvirt cpu_model "$LIBVIRT_CPU_MODEL" + iniset $NOVA_CONF libvirt cpu_models "$LIBVIRT_CPU_MODEL" fi # Do not enable USB tablet input devices to avoid QEMU CPU overhead. iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse" @@ -56,6 +56,10 @@ function configure_nova_hypervisor { # arm64-specific configuration if is_arch "aarch64"; then iniset $NOVA_CONF libvirt cpu_mode "host-passthrough" + # NOTE(mnaser): We cannot have `cpu_models` set if the `cpu_mode` is + # set to `host-passthrough`, or `nova-compute` refuses to + # start. + inidelete $NOVA_CONF libvirt cpu_models fi if isset ENABLE_FILE_INJECTION; then @@ -114,9 +118,6 @@ function install_nova_hypervisor { sudo dpkg-statoverride --add --update $STAT_OVERRIDE fi done - elif is_suse; then - # Workaround for missing dependencies in python-libguestfs - install_package python-libguestfs guestfs-data augeas augeas-lenses elif is_fedora; then install_package python3-libguestfs fi diff --git a/lib/os-vif b/lib/os-vif index 865645c0d5..7c8bee3744 100644 --- a/lib/os-vif +++ b/lib/os-vif @@ -1,10 +1,5 @@ #!/bin/bash -# support vsctl or native. -# until bug #1929446 is resolved we override the os-vif default -# and fall back to the legacy "vsctl" driver. -OS_VIF_OVS_OVSDB_INTERFACE=${OS_VIF_OVS_OVSDB_INTERFACE:="vsctl"} - function is_ml2_ovs { if [[ "${Q_AGENT}" == "openvswitch" ]]; then echo "True" @@ -19,11 +14,9 @@ OS_VIF_OVS_ISOLATE_VIF=$(trueorfalse False OS_VIF_OVS_ISOLATE_VIF) function configure_os_vif { if [[ -e ${NOVA_CONF} ]]; then - iniset ${NOVA_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE} iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} fi if [[ -e ${NEUTRON_CONF} ]]; then - iniset ${NEUTRON_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE} iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} fi } diff --git a/lib/oslo b/lib/oslo deleted file mode 100644 index 3ae64c8210..0000000000 --- a/lib/oslo +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# -# lib/oslo -# -# Functions to install **Oslo** libraries from git -# -# We need this to handle the fact that projects would like to use -# pre-released versions of oslo libraries. -# -# Included for compatibility with grenade, remove in Queens -source $TOP_DIR/lib/libraries diff --git a/lib/placement b/lib/placement index b7798669a1..03aaa0344b 100644 --- a/lib/placement +++ b/lib/placement @@ -37,7 +37,7 @@ if [[ ${USE_VENV} = True ]]; then else PLACEMENT_BIN_DIR=$(get_python_exec_prefix) fi -PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/placement-api +PLACEMENT_UWSGI=placement.wsgi.api:application PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini if is_service_enabled tls-proxy; then @@ -48,6 +48,12 @@ fi PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST} +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to switch the Placement API policies scope and new defaults. +# By Default, these flag are False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +PLACEMENT_ENFORCE_SCOPE=$(trueorfalse False PLACEMENT_ENFORCE_SCOPE) + # Functions # --------- @@ -62,33 +68,7 @@ function is_placement_enabled { # runs that a clean run would need to clean up function cleanup_placement { sudo rm -f $(apache_site_config_for placement-api) - remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" -} - -# _config_placement_apache_wsgi() - Set WSGI config files -function _config_placement_apache_wsgi { - local placement_api_apache_conf - local venv_path="" - local placement_bin_dir="" - placement_bin_dir=$(get_python_exec_prefix) - placement_api_apache_conf=$(apache_site_config_for placement-api) - - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["placement"]}/lib/$(python_version)/site-packages" - placement_bin_dir=${PROJECT_VENV["placement"]}/bin - fi - - sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf - sudo sed -e " - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%PUBLICWSGI%|$placement_bin_dir/placement-api|g; - s|%SSLENGINE%|$placement_ssl|g; - s|%SSLCERTFILE%|$placement_certfile|g; - s|%SSLKEYFILE%|$placement_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - s|%APIWORKERS%|$API_WORKERS|g - " -i $placement_api_apache_conf + remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "placement-api" } # create_placement_conf() - Write config @@ -106,10 +86,13 @@ function configure_placement { sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR create_placement_conf - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" + write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" "" "placement-api" + if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True + iniset $PLACEMENT_CONF oslo_policy enforce_scope True else - _config_placement_apache_wsgi + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults False + iniset $PLACEMENT_CONF oslo_policy enforce_scope False fi } @@ -134,7 +117,6 @@ function init_placement { # install_placement() - Collect source and prepare function install_placement { - install_apache_wsgi # Install the openstackclient placement client plugin for CLI pip_install_gr osc-placement git_clone $PLACEMENT_REPO $PLACEMENT_DIR $PLACEMENT_BRANCH @@ -143,12 +125,7 @@ function install_placement { # start_placement_api() - Start the API processes ahead of other things function start_placement_api { - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" - else - enable_apache_site placement-api - restart_apache_server - fi + run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" echo "Waiting for placement-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement; then @@ -162,12 +139,7 @@ function start_placement { # stop_placement() - Disable the api service and stop it. function stop_placement { - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - stop_process "placement-api" - else - disable_apache_site placement-api - restart_apache_server - fi + stop_process "placement-api" } # Restore xtrace diff --git a/lib/rpc_backend b/lib/rpc_backend index 743b4ae170..bbb41499be 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -52,20 +52,7 @@ function install_rpc_backend { if is_service_enabled rabbit; then # Install rabbitmq-server install_package rabbitmq-server - if is_suse; then - install_package rabbitmq-server-plugins - # the default systemd socket activation only listens on the loopback interface - # which causes rabbitmq to try to start its own epmd - sudo mkdir -p /etc/systemd/system/epmd.socket.d - cat </dev/null -[Socket] -ListenStream= -ListenStream=[::]:4369 -EOF - sudo systemctl daemon-reload - sudo systemctl restart epmd.socket epmd.service - fi - if is_fedora || is_suse; then + if is_fedora; then # NOTE(jangutter): If rabbitmq is not running (as in a fresh # install) then rabbit_setuser triggers epmd@0.0.0.0.socket with # socket activation. This fails the first time and does not get diff --git a/lib/swift b/lib/swift index ba92f3dcc3..2710346451 100644 --- a/lib/swift +++ b/lib/swift @@ -318,8 +318,8 @@ function generate_swift_config_services { iniuncomment ${swift_node_config} DEFAULT mount_check iniset ${swift_node_config} DEFAULT mount_check false - iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode - iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes + iniuncomment ${swift_node_config} ${server_type}-replicator rsync_module + iniset ${swift_node_config} ${server_type}-replicator rsync_module "{replication_ip}::${server_type}{replication_port}" # Using a sed and not iniset/iniuncomment because we want to a global # modification and make sure it works for new sections. @@ -402,6 +402,11 @@ function configure_swift { # Versioned Writes iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true + # Add sha1 temporary https://storyboard.openstack.org/#!/story/2010068 + if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempurl allowed_digests "sha1 sha256 sha512" + fi + # Configure Ceilometer if is_service_enabled ceilometer; then iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN" @@ -429,6 +434,13 @@ function configure_swift { swift_pipeline+=" s3token" iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_SERVICE_URI_V3} iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token secret_cache_duration 900 + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_url ${KEYSTONE_SERVICE_URI} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token project_name 'service' + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token project_domain_name ${SERVICE_DOMAIN_NAME} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token username 'swift' + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token user_domain_name ${SERVICE_DOMAIN_NAME} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token password ${SERVICE_PASSWORD} fi swift_pipeline+=" keystoneauth" fi @@ -472,6 +484,9 @@ function configure_swift { iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_header_size ${SWIFT_MAX_HEADER_SIZE} iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_file_size ${SWIFT_MAX_FILE_SIZE} + # Create an additional storage policy + iniset ${SWIFT_CONF_DIR}/swift.conf storage-policy:1 name silver + local node_number for node_number in ${SWIFT_REPLICAS_SEQ}; do local swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf @@ -542,9 +557,6 @@ function configure_swift { local swift_log_dir=${SWIFT_DATA_DIR}/logs sudo rm -rf ${swift_log_dir} local swift_log_group=adm - if is_suse; then - swift_log_group=root - fi sudo install -d -o ${STACK_USER} -g ${swift_log_group} ${swift_log_dir}/hourly if [[ $SYSLOG != "False" ]]; then @@ -711,6 +723,10 @@ function init_swift { $SWIFT_BIN_DIR/swift-ring-builder object.builder rebalance 42 $SWIFT_BIN_DIR/swift-ring-builder container.builder rebalance 42 $SWIFT_BIN_DIR/swift-ring-builder account.builder rebalance 42 + + # An additional storage policy requires an object ring as well. + # Re-using the previously created one to use the same devices. + cp object.ring.gz object-1.ring.gz } && popd >/dev/null } @@ -836,20 +852,22 @@ function stop_swift { for type in proxy object container account; do stop_process s-${type} done + # Stop the container-sync daemon if it was started + stop_process s-container-sync # Blast out any stragglers pkill -f swift- || true } function swift_configure_tempurls { # note we are using swift credentials! - openstack --os-cloud "" \ - --os-region-name $REGION_NAME \ - --os-auth-url $KEYSTONE_SERVICE_URI \ - --os-username=swift \ - --os-password=$SERVICE_PASSWORD \ - --os-user-domain-name=$SERVICE_DOMAIN_NAME \ - --os-project-name=$SERVICE_PROJECT_NAME \ - --os-project-domain-name=$SERVICE_DOMAIN_NAME \ + openstack --os-cloud="" \ + --os-region-name="$REGION_NAME" \ + --os-auth-url="$KEYSTONE_SERVICE_URI" \ + --os-username="swift" \ + --os-password="$SERVICE_PASSWORD" \ + --os-user-domain-name="$SERVICE_DOMAIN_NAME" \ + --os-project-name="$SERVICE_PROJECT_NAME" \ + --os-project-domain-name="$SERVICE_DOMAIN_NAME" \ object store account \ set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY" } diff --git a/lib/tempest b/lib/tempest index 1fd4184763..1ebe9c5f1f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -18,7 +18,7 @@ # - ``PUBLIC_NETWORK_NAME`` # - ``VIRT_DRIVER`` # - ``LIBVIRT_TYPE`` -# - ``KEYSTONE_SERVICE_URI``, ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone +# - ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone # # Optional Dependencies: # @@ -29,6 +29,7 @@ # - ``DEFAULT_INSTANCE_USER`` # - ``DEFAULT_INSTANCE_ALT_USER`` # - ``CINDER_ENABLED_BACKENDS`` +# - ``CINDER_BACKUP_DRIVER`` # - ``NOVA_ALLOW_DUPLICATE_NETWORKS`` # # ``stack.sh`` calls the entry points in this order: @@ -71,6 +72,17 @@ TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-$TEMPEST_DEFAULT_VOLUME_VENDOR} TEMPEST_DEFAULT_STORAGE_PROTOCOL="iSCSI" TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PROTOCOL} +# Glance/Image variables +# When Glance image import is enabled, image creation is asynchronous and images +# may not yet be active when tempest looks for them. In that case, we poll +# Glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds for the number of +# times specified by TEMPEST_GLANCE_IMPORT_POLL_LIMIT. If you are importing +# multiple images, set TEMPEST_GLANCE_IMAGE_COUNT so the poller does not quit +# too early (though it will not exceed the polling limit). +TEMPEST_GLANCE_IMPORT_POLL_INTERVAL=${TEMPEST_GLANCE_IMPORT_POLL_INTERVAL:-1} +TEMPEST_GLANCE_IMPORT_POLL_LIMIT=${TEMPEST_GLANCE_IMPORT_POLL_LIMIT:-12} +TEMPEST_GLANCE_IMAGE_COUNT=${TEMPEST_GLANCE_IMAGE_COUNT:-1} + # Neutron/Network variables IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED) IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED) @@ -90,6 +102,11 @@ TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False TEMPEST_USE_TEST_ACCOUNTS) # it will run tempest with TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} +TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} +TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} + +TEMPEST_USE_ISO_IMAGE=$(trueorfalse False TEMPEST_USE_ISO_IMAGE) + # Functions # --------- @@ -117,6 +134,13 @@ function set_tempest_venv_constraints { (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt 2>/dev/null || git show origin/master:upper-constraints.txt) > $tmp_c + # NOTE(gmann): we need to set the below env var pointing to master + # constraints even that is what default in tox.ini. Otherwise it can + # create the issue for grenade run where old and new devstack can have + # different tempest (old and master) to install. For detail problem, + # refer to the https://bugs.launchpad.net/devstack/+bug/2003993 + export UPPER_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master + export TOX_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master else echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env." cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c @@ -127,6 +151,53 @@ function set_tempest_venv_constraints { fi } +# Makes a call to glance to get a list of active images, ignoring +# ramdisk and kernel images. Takes 3 arguments, an array and two +# variables. The array will contain the list of active image UUIDs; +# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be +# set as the value img_id ($2) parameters. +function get_active_images { + declare -n img_array=$1 + declare -n img_id=$2 + + # start with a fresh array in case we are called multiple times + img_array=() + + # NOTE(gmaan): Most of the iso image require ssh to be enabled explicitly + # and if we set those iso images in image_ref and image_ref_alt that can + # cause test to fail because many tests using image_ref and image_ref_alt + # to boot server also perform ssh. We skip to set iso image in tempest + # unless it is requested via TEMPEST_USE_ISO_IMAGE. + while read -r IMAGE_NAME IMAGE_UUID DISK_FORMAT; do + if [[ "$DISK_FORMAT" == "iso" ]] && [[ "$TEMPEST_USE_ISO_IMAGE" == False ]]; then + continue + fi + if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then + img_id="$IMAGE_UUID" + fi + img_array+=($IMAGE_UUID) + done < <(openstack --os-cloud devstack-admin image list --long --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2,$4 }') +} + +function poll_glance_images { + declare -n image_array=$1 + declare -n image_id=$2 + local -i poll_count + + poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT + while (( poll_count-- > 0 )) ; do + sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL + get_active_images image_array image_id + if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then + return + fi + done + local msg + msg="Polling limit of $TEMPEST_GLANCE_IMPORT_POLL_LIMIT exceeded; " + msg+="poll interval was $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL sec" + warn $LINENO "$msg" +} + # configure_tempest() - Set config files, create data dirs, etc function configure_tempest { if [[ "$INSTALL_TEMPEST" == "True" ]]; then @@ -136,6 +207,8 @@ function configure_tempest { pip_install_gr testrepository fi + local ENABLED_SERVICES=${SERVICES_FOR_TEMPEST:=$ENABLED_SERVICES} + local image_lines local images local num_images @@ -168,13 +241,21 @@ function configure_tempest { declare -a images if is_service_enabled glance; then - while read -r IMAGE_NAME IMAGE_UUID; do - if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then - image_uuid="$IMAGE_UUID" - image_uuid_alt="$IMAGE_UUID" + get_active_images images image_uuid + + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + # Glance image import is asynchronous and may be configured + # to do image conversion. If image import is being used, + # it's possible that this code is being executed before the + # import has completed and there may be no active images yet. + if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then + poll_glance_images images image_uuid + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT" + exit 1 + fi fi - images+=($IMAGE_UUID) - done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + fi case "${#images[*]}" in 0) @@ -184,13 +265,22 @@ function configure_tempest { 1) if [ -z "$image_uuid" ]; then image_uuid=${images[0]} - image_uuid_alt=${images[0]} fi + image_uuid_alt=$image_uuid ;; *) if [ -z "$image_uuid" ]; then image_uuid=${images[0]} - image_uuid_alt=${images[1]} + if [ -z "$image_uuid_alt" ]; then + image_uuid_alt=${images[1]} + fi + elif [ -z "$image_uuid_alt" ]; then + for image in ${images[@]}; do + if [[ "$image" != "$image_uuid" ]]; then + image_uuid_alt=$image + break + fi + done fi ;; esac @@ -220,13 +310,15 @@ function configure_tempest { if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then # Determine the flavor disk size based on the image size. disk=$(image_size_in_gib $image_uuid) - openstack --os-cloud devstack-admin flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano + ram=${TEMPEST_FLAVOR_RAM} + openstack --os-cloud devstack-admin flavor create --id 42 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano fi flavor_ref=42 if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then # Determine the alt flavor disk size based on the alt image size. disk=$(image_size_in_gib $image_uuid_alt) - openstack --os-cloud devstack-admin flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro + ram=${TEMPEST_FLAVOR_ALT_RAM} + openstack --os-cloud devstack-admin flavor create --id 84 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro fi flavor_ref_alt=84 else @@ -277,6 +369,20 @@ function configure_tempest { fi fi + if is_service_enabled glance; then + git_clone $OSTESTIMAGES_REPO $OSTESTIMAGES_DIR $OSTESTIMAGES_BRANCH + pushd $OSTESTIMAGES_DIR + tox -egenerate + popd + iniset $TEMPEST_CONFIG image images_manifest_file ${OSTESTIMAGES_DIR}/images/manifest.yaml + local image_conversion + image_conversion=$(iniget $GLANCE_IMAGE_IMPORT_CONF image_conversion output_format) + if [[ -n "$image_conversion" ]]; then + iniset $TEMPEST_CONFIG image-feature-enabled image_conversion True + fi + iniset $TEMPEST_CONFIG image-feature-enabled image_format_enforcement $GLANCE_ENFORCE_IMAGE_FORMAT + fi + iniset $TEMPEST_CONFIG network project_network_cidr $FIXED_RANGE ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method} @@ -306,7 +412,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT # Identity - iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_URI/v2.0/" iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3" iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION @@ -317,19 +422,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG auth admin_project_name $admin_project_name iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name fi - if [ "$ENABLE_IDENTITY_V2" == "True" ]; then - # Run Identity API v2 tests ONLY if needed - iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 True - else - # Skip Identity API v2 tests by default - iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False - fi iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3} - if [[ "$TEMPEST_AUTH_VERSION" != "v2" ]]; then - # we're going to disable v2 admin unless we're using v2 by default. - iniset $TEMPEST_CONFIG identity-feature-enabled api_v2_admin False - fi - if is_service_enabled tls-proxy; then iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE fi @@ -431,9 +524,19 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled volume_multiattach True fi - if is_service_enabled n-novnc; then + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then iniset $TEMPEST_CONFIG compute-feature-enabled vnc_console True fi + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled spice_console True + fi + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled serial_console True + fi + + # NOTE(gmaan): Since 2025.2, 'manager' role is available in nova. + local nova_policy_roles="admin,manager,member,reader,service" + iniset $TEMPEST_CONFIG compute-feature-enabled nova_policy_roles $nova_policy_roles # Network iniset $TEMPEST_CONFIG network project_networks_reachable false @@ -449,8 +552,19 @@ function configure_tempest { # Scenario SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME + SCENARIO_IMAGE_TYPE=${SCENARIO_IMAGE_TYPE:-cirros} iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE + # since version 0.6.0 cirros uses dhcpcd dhcp client by default, however, cirros, prior to the + # version 0.6.0, used udhcpc (the only available client at that time) which is also tempest's default + if [[ "$SCENARIO_IMAGE_TYPE" == "cirros" ]]; then + # the image is a cirros image + # use dhcpcd client when version greater or equal 0.6.0 + if [[ $(echo $CIRROS_VERSION | tr -d '.') -ge 060 ]]; then + iniset $TEMPEST_CONFIG scenario dhcp_client dhcpcd + fi + fi + # If using provider networking, use the physical network for validation rather than private TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME if is_provider_network; then @@ -486,6 +600,10 @@ function configure_tempest { TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True} fi iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT) + iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_encrypted_volume ${TEMPEST_EXTEND_ATTACHED_ENCRYPTED_VOLUME:-False} + if [[ "$CINDER_BACKUP_DRIVER" == *"swift"* ]]; then + iniset $TEMPEST_CONFIG volume backup_driver swift + fi local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} if [ "$tempest_volume_min_microversion" == "None" ]; then @@ -536,6 +654,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume storage_protocol "$TEMPEST_STORAGE_PROTOCOL" fi + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + iniset $TEMPEST_CONFIG volume volume_type_multiattach $VOLUME_TYPE_MULTIATTACH + fi + # Placement Features # Set the microversion range for placement. # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests. @@ -609,8 +731,14 @@ function configure_tempest { # test can be run with scoped token. if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $TEMPEST_CONFIG enforce_scope keystone true - iniset $TEMPEST_CONFIG auth admin_system 'all' - iniset $TEMPEST_CONFIG auth admin_project_name '' + fi + + if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope nova true + fi + + if [[ "$PLACEMENT_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope placement true fi if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then @@ -634,13 +762,13 @@ function configure_tempest { local tmp_cfg_file tmp_cfg_file=$(mktemp) cd $TEMPEST_DIR - if [[ "$OFFLINE" != "True" ]]; then - tox -revenv-tempest --notest - fi local tmp_u_c_m tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) set_tempest_venv_constraints $tmp_u_c_m + if [[ "$OFFLINE" != "True" ]]; then + tox -revenv-tempest --notest + fi tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt rm -f $tmp_u_c_m @@ -668,12 +796,12 @@ function configure_tempest { # Neutron API Extensions # disable metering if we didn't enable the service - if ! is_service_enabled q-metering; then + if ! is_service_enabled q-metering neutron-metering; then DISABLE_NETWORK_API_EXTENSIONS+=", metering" fi # disable l3_agent_scheduler if we didn't enable L3 agent - if ! is_service_enabled q-l3; then + if ! is_service_enabled q-l3 neutron-l3; then DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler" fi @@ -714,7 +842,12 @@ function configure_tempest { # install_tempest() - Collect source and prepare function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH - pip_install 'tox!=2.8.0' + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + pip_install 'tox!=2.8.0,<4.0.0' pushd $TEMPEST_DIR # NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH # is tag name not master. git_clone would not checkout tag because diff --git a/lib/tls b/lib/tls index 5a7f5ae324..fa0a448d7d 100644 --- a/lib/tls +++ b/lib/tls @@ -212,9 +212,6 @@ function init_CA { if is_fedora; then sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem sudo update-ca-trust - elif is_suse; then - sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/trust/anchors/devstack-chain.pem - sudo update-ca-certificates elif is_ubuntu; then sudo cp $INT_CA_DIR/ca-chain.pem /usr/local/share/ca-certificates/devstack-int.crt sudo cp $ROOT_CA_DIR/cacert.pem /usr/local/share/ca-certificates/devstack-root.crt @@ -367,8 +364,11 @@ function deploy_int_CA { function fix_system_ca_bundle_path { if is_service_enabled tls-proxy; then local capath - capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') - + if [[ "$GLOBAL_VENV" == "True" ]] ; then + capath=$($DEVSTACK_VENV/bin/python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + else + capath=$(python$PYTHON3_VERSION -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + fi if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then if is_fedora; then sudo rm -f $capath @@ -376,9 +376,6 @@ function fix_system_ca_bundle_path { elif is_ubuntu; then sudo rm -f $capath sudo ln -s /etc/ssl/certs/ca-certificates.crt $capath - elif is_suse; then - sudo rm -f $capath - sudo ln -s /etc/ssl/ca-bundle.pem $capath else echo "Don't know how to set the CA bundle, expect the install to fail." fi @@ -441,9 +438,6 @@ function enable_mod_ssl { if is_ubuntu; then sudo a2enmod ssl - elif is_suse; then - sudo a2enmod ssl - sudo a2enflag SSL elif is_fedora; then # Fedora enables mod_ssl by default : @@ -458,6 +452,7 @@ function enable_mod_ssl { # =============== function tune_apache_connections { + local should_restart=$1 local tuning_file=$APACHE_SETTINGS_DIR/connection-tuning.conf if ! [ -f $tuning_file ] ; then sudo bash -c "cat > $tuning_file" << EOF @@ -500,7 +495,12 @@ ThreadLimit 64 MaxRequestsPerChild 0 EOF - restart_apache_server + if [ "$should_restart" != "norestart" ] ; then + # Only restart the apache server if we know we really want to + # do so. Too many restarts in a short period of time is treated + # as an error by systemd. + restart_apache_server + fi fi } @@ -515,7 +515,8 @@ function start_tls_proxy { # 8190 is the default apache size. local f_header_size=${6:-8190} - tune_apache_connections + # We don't restart apache here as we'll do it at the end of the function. + tune_apache_connections norestart local config_file config_file=$(apache_site_config_for $b_service) @@ -536,6 +537,7 @@ $listen_string SSLEngine On SSLCertificateFile $DEVSTACK_CERT + SSLProtocol -all +TLSv1.3 +TLSv1.2 # Disable KeepAlive to fix bug #1630664 a.k.a the # ('Connection aborted.', BadStatusLine("''",)) error @@ -549,22 +551,23 @@ $listen_string # Avoid races (at the cost of performance) to re-use a pooled connection # where the connection is closed (bug 1807518). + # Set acquire=1 to disable waiting for connection pool members so that + # we can determine when apache is overloaded (returns 503). SetEnv proxy-initial-not-pooled - ProxyPass http://$b_host:$b_port/ retry=0 nocanon + ProxyPass http://$b_host:$b_port/ retry=0 nocanon acquire=1 ProxyPassReverse http://$b_host:$b_port/ ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i" LogLevel info - CustomLog $APACHE_LOG_DIR/tls-proxy_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b" + CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined EOF - if is_suse ; then - sudo a2enflag SSL - fi for mod in headers ssl proxy proxy_http; do - enable_apache_mod $mod + # We don't need to restart here as we will restart once at the end + # of the function. + enable_apache_mod $mod norestart done enable_apache_site $b_service restart_apache_server diff --git a/openrc b/openrc index 6d488bb0ba..e800abeb3d 100644 --- a/openrc +++ b/openrc @@ -7,9 +7,6 @@ # Set OS_USERNAME to override the default user name 'demo' # Set ADMIN_PASSWORD to set the password for 'admin' and 'demo' -# NOTE: support for the old NOVA_* novaclient environment variables has -# been removed. - if [[ -n "$1" ]]; then OS_USERNAME=$1 fi @@ -35,26 +32,11 @@ fi # Get some necessary configuration source $RC_DIR/lib/tls -# The OpenStack ecosystem has standardized the term **project** as the -# entity that owns resources. In some places **tenant** remains -# referenced, but in all cases this just means **project**. We will -# warn if we need to turn on legacy **tenant** support to have a -# working environment. +# Minimal configuration +export OS_AUTH_TYPE=password export OS_PROJECT_NAME=${OS_PROJECT_NAME:-demo} - -echo "WARNING: setting legacy OS_TENANT_NAME to support cli tools." -export OS_TENANT_NAME=$OS_PROJECT_NAME - -# In addition to the owning entity (project), nova stores the entity performing -# the action as the **user**. export OS_USERNAME=${OS_USERNAME:-demo} - -# With Keystone you pass the keystone password instead of an api key. -# Recent versions of novaclient use OS_PASSWORD instead of NOVA_API_KEYs -# or NOVA_PASSWORD. export OS_PASSWORD=${ADMIN_PASSWORD:-secret} - -# Region export OS_REGION_NAME=${REGION_NAME:-RegionOne} # Set the host API endpoint. This will default to HOST_IP if SERVICE_IP_VERSION @@ -73,30 +55,14 @@ else GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} fi -# Identity API version -export OS_IDENTITY_API_VERSION=3 - -# Ask keystoneauth1 to use keystone -export OS_AUTH_TYPE=password - -# Authenticating against an OpenStack cloud using Keystone returns a **Token** -# and **Service Catalog**. The catalog contains the endpoints for all services -# the user/project has access to - including nova, glance, keystone, swift, ... -# We currently recommend using the version 3 *identity api*. -# - # If you don't have a working .stackenv, this is the backup position KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000 KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP} export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI} -# Currently, in order to use openstackclient with Identity API v3, -# we need to set the domain which the user and project belong to. -if [ "$OS_IDENTITY_API_VERSION" = "3" ]; then - export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"} - export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"} -fi +export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"} +export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"} # Set OS_CACERT to a default CA certificate chain if it exists. if [[ ! -v OS_CACERT ]] ; then @@ -106,8 +72,3 @@ if [[ ! -v OS_CACERT ]] ; then export OS_CACERT=$DEFAULT_OS_CACERT fi fi - -# Currently cinderclient needs you to specify the *volume api* version. This -# needs to match the config of your catalog returned by Keystone. -export CINDER_VERSION=${CINDER_VERSION:-3} -export OS_VOLUME_API_VERSION=${OS_VOLUME_API_VERSION:-$CINDER_VERSION} diff --git a/playbooks/post.yaml b/playbooks/post.yaml index d8d5f6833c..0047d78ea5 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -17,6 +17,12 @@ dest: "{{ stage_dir }}/verify_tempest_conf.log" state: hard when: tempest_log.stat.exists + - name: Capture most recent qemu crash dump, if any + shell: + executable: /bin/bash + cmd: | + coredumpctl -o {{ devstack_log_dir }}/qemu.coredump dump /usr/bin/qemu-system-x86_64 + ignore_errors: yes roles: - export-devstack-journal - apache-logs-conf diff --git a/playbooks/tox/pre.yaml b/playbooks/tox/pre.yaml index d7e4670a80..68d5254251 100644 --- a/playbooks/tox/pre.yaml +++ b/playbooks/tox/pre.yaml @@ -5,4 +5,10 @@ bindep_profile: test bindep_dir: "{{ zuul_work_dir }}" - test-setup - - ensure-tox + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + - role: ensure-tox + ensure_tox_version: "<4" diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml index f9bb0f7851..51a11b60bc 100644 --- a/roles/capture-performance-data/tasks/main.yaml +++ b/roles/capture-performance-data/tasks/main.yaml @@ -3,7 +3,9 @@ executable: /bin/bash cmd: | source {{ devstack_conf_dir }}/stackrc - python3 {{ devstack_conf_dir }}/tools/get-stats.py \ + source {{ devstack_conf_dir }}/inc/python + setup_devstack_virtualenv + $PYTHON {{ devstack_conf_dir }}/tools/get-stats.py \ --db-user="$DATABASE_USER" \ --db-pass="$DATABASE_PASSWORD" \ --db-host="$DATABASE_HOST" \ diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst index c28412457a..1376f63bfc 100644 --- a/roles/capture-system-logs/README.rst +++ b/roles/capture-system-logs/README.rst @@ -9,6 +9,7 @@ Stage a number of different logs / reports: - coredumps - dns resolver - listen53 +- services - unbound.log - deprecation messages diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml index 905806d529..4b5ec4836b 100644 --- a/roles/capture-system-logs/tasks/main.yaml +++ b/roles/capture-system-logs/tasks/main.yaml @@ -4,7 +4,13 @@ executable: /bin/bash cmd: | sudo iptables-save > {{ stage_dir }}/iptables.txt - df -h > {{ stage_dir }}/df.txt + + # NOTE(sfernand): Run 'df' with a 60s timeout to prevent hangs from + # stale NFS mounts. + timeout -s 9 60s df -h > {{ stage_dir }}/df.txt || true + # If 'df' times out, the mount output helps debug which NFS share + # is unresponsive. + mount > {{ stage_dir }}/mount.txt for py_ver in 2 3; do if [[ `which python${py_ver}` ]]; then @@ -19,6 +25,9 @@ rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt fi + # Services status + sudo systemctl status --all > services.txt 2>/dev/null + # NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU # failed to start due to denials from SELinux — useful for CentOS # and Fedora machines. For Ubuntu (which runs AppArmor), DevStack diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst index 400a8da222..3bddf5ea60 100644 --- a/roles/devstack-ipv6-only-deployments-verification/README.rst +++ b/roles/devstack-ipv6-only-deployments-verification/README.rst @@ -1,10 +1,10 @@ -Verify the IPv6-only deployments +Verify all addresses in IPv6-only deployments This role needs to be invoked from a playbook that -run tests. This role verifies the IPv6 setting on -devstack side and devstack deploy services on IPv6. -This role is invoked before tests are run so that -if any missing IPv6 setting or deployments can fail +runs tests. This role verifies the IPv6 settings on the +devstack side and that devstack deploys with all addresses +being IPv6. This role is invoked before tests are run so that +if there is any missing IPv6 setting, deployments can fail the job early. diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml index 2b8ae01a62..b8ee7e35a7 100644 --- a/roles/orchestrate-devstack/tasks/main.yaml +++ b/roles/orchestrate-devstack/tasks/main.yaml @@ -4,6 +4,7 @@ when: inventory_hostname == 'controller' - name: Setup devstack on sub-nodes + any_errors_fatal: true block: - name: Distribute the build sshkey for the user "stack" diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml index 294c29cd29..cb7c6e3af8 100644 --- a/roles/setup-devstack-source-dirs/tasks/main.yaml +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -43,9 +43,9 @@ base_branch={{ devstack_sources_branch }} if git branch -a | grep "$base_branch" > /dev/null ; then git checkout $base_branch - elif [[ "$base_branch" == stable/* ]]; then + elif [[ "$base_branch" == stable/* ]] || [[ "$base_branch" == unmaintained/* ]]; then # Look for an eol tag for the stable branch. - eol_tag=${base_branch#stable/}-eol + eol_tag="${base_branch#*/}-eol" if git tag -l |grep $eol_tag >/dev/null; then git checkout $eol_tag git reset --hard $eol_tag diff --git a/samples/local.sh b/samples/local.sh index a1c5c8143b..7e6ae70ad4 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -31,7 +31,7 @@ if is_service_enabled nova; then # ``demo``) # Get OpenStack user auth - source $TOP_DIR/openrc + export OS_CLOUD=devstack # Add first keypair found in localhost:$HOME/.ssh for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do diff --git a/stack.sh b/stack.sh index 6e9ced985e..c6d37611c9 100755 --- a/stack.sh +++ b/stack.sh @@ -12,7 +12,7 @@ # a multi-node developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (Bionic or newer), **Fedora** (F24 or newer), or **CentOS/RHEL** +# (Bionic or newer) or **CentOS/RHEL/RockyLinux** # (7 or newer) machine. (It may work on other platforms but support for those # platforms is left to those who added them to DevStack.) It should work in # a VM or physical server. Additionally, we maintain a list of ``deb`` and @@ -229,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03" +SUPPORTED_DISTROS="trixie|bookworm|noble|rhel9|rhel10" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" @@ -280,13 +280,6 @@ chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh -# TODO(wxy): Currently some base packages are not installed by default in -# openEuler. Remove the code below once the packaged are installed by default -# in the future. -if [[ $DISTRO == "openEuler-20.03" ]]; then - install_package hostname -fi - # Configure Distro Repositories # ----------------------------- @@ -308,17 +301,19 @@ function _install_epel { } function _install_rdo { - if [[ $DISTRO == "rhel8" ]]; then + if [[ $DISTRO =~ "rhel" ]]; then + VERSION=${DISTRO:4:2} + rdo_release=${TARGET_BRANCH#*/} if [[ "$TARGET_BRANCH" == "master" ]]; then - # rdo-release.el8.rpm points to latest RDO release, use that for master - sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm + # adding delorean-deps repo to provide current master rpms + sudo wget https://trunk.rdoproject.org/centos${VERSION}-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo else - # For stable branches use corresponding release rpm - rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") - sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm + if sudo dnf provides centos-release-openstack-${rdo_release} >/dev/null 2>&1; then + sudo dnf -y install centos-release-openstack-${rdo_release} + else + sudo wget https://trunk.rdoproject.org/centos${VERSION}-${rdo_release}/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + fi fi - elif [[ $DISTRO == "rhel9" ]]; then - sudo curl -L -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos9-master/delorean-deps.repo fi sudo dnf -y update } @@ -341,7 +336,9 @@ fi # Destination path for devstack logs if [[ -n ${LOGDIR:-} ]]; then - mkdir -p $LOGDIR + sudo mkdir -p $LOGDIR + safe_chown -R $STACK_USER $LOGDIR + safe_chmod 0755 $LOGDIR fi # Destination path for service data @@ -359,7 +356,7 @@ async_init # Certain services such as rabbitmq require that the local hostname resolves # correctly. Make sure it exists in /etc/hosts so that is always true. LOCAL_HOSTNAME=`hostname -s` -if ! fgrep -qwe "$LOCAL_HOSTNAME" /etc/hosts; then +if ! grep -Fqwe "$LOCAL_HOSTNAME" /etc/hosts; then sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts fi @@ -368,39 +365,38 @@ fi # to speed things up SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL) -if [[ $DISTRO == "rhel8" ]]; then - # If we have /etc/ci/mirror_info.sh assume we're on a OpenStack CI - # node, where EPEL is installed (but disabled) and already - # pointing at our internal mirror - if [[ -f /etc/ci/mirror_info.sh ]]; then - SKIP_EPEL_INSTALL=True - sudo dnf config-manager --set-enabled epel - fi - - # PowerTools repo provides libyaml-devel required by devstack itself and - # EPEL packages assume that the PowerTools repository is enable. - sudo dnf config-manager --set-enabled PowerTools - - # CentOS 8.3 changed the repository name to lower case. - sudo dnf config-manager --set-enabled powertools - - if [[ ${SKIP_EPEL_INSTALL} != True ]]; then - _install_epel - fi - # Along with EPEL, CentOS (and a-likes) require some packages only - # available in RDO repositories (e.g. OVS, or later versions of - # kvm) to run. +if [[ $DISTRO == "rhel9" ]]; then + # for CentOS Stream 9 repository + sudo dnf config-manager --set-enabled crb + # for RHEL 9 repository + sudo dnf config-manager --set-enabled codeready-builder-for-rhel-9-x86_64-rpms + # rabbitmq and other packages are provided by RDO repositories. _install_rdo - # NOTE(cgoncalves): workaround RHBZ#1154272 - # dnf fails for non-privileged users when expired_repos.json doesn't exist. - # RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1154272 - # Patch: https://github.com/rpm-software-management/dnf/pull/1448 - echo "[]" | sudo tee /var/cache/dnf/expired_repos.json -elif [[ $DISTRO == "rhel9" ]]; then + # Some distributions (Rocky Linux 9) provide curl-minimal instead of curl, + # it triggers a conflict when devstack wants to install "curl". + # Swap curl-minimal with curl. + if is_package_installed curl-minimal; then + sudo dnf swap -y curl-minimal curl + fi +elif [[ $DISTRO == "rhel10" ]]; then + # for CentOS Stream 10 repository sudo dnf config-manager --set-enabled crb # rabbitmq and other packages are provided by RDO repositories. _install_rdo +elif [[ $DISTRO == "openEuler-22.03" ]]; then + # There are some problem in openEuler. We should fix it first. Some required + # package/action runs before fixup script. So we can't fix there. + # + # 1. the hostname package is not installed by default + # 2. Some necessary packages are in openstack repo, for example liberasurecode-devel + # 3. python3-pip can be uninstalled by `get_pip.py` automaticly. + # 4. Ensure wget installation before use + install_package hostname openstack-release-wallaby wget + uninstall_package python3-pip + + # Add yum repository for libvirt7.X + sudo wget https://eur.openeuler.openatom.cn/coprs/g/sig-openstack/Libvirt-7.X/repo/openeuler-22.03_LTS/group_sig-openstack-Libvirt-7.X-openeuler-22.03_LTS.repo -O /etc/yum.repos.d/libvirt7.2.0.repo fi # Ensure python is installed @@ -585,6 +581,12 @@ rm -f $SSL_BUNDLE_FILE source $TOP_DIR/lib/database source $TOP_DIR/lib/rpc_backend +# load host tuning functions and defaults +source $TOP_DIR/lib/host +# tune host memory early to ensure zswap/ksm are configured before +# doing memory intensive operation like cloning repos or unpacking packages. +tune_host + # Configure Projects # ================== @@ -615,6 +617,7 @@ source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/atop source $TOP_DIR/lib/tcpdump source $TOP_DIR/lib/etcd3 source $TOP_DIR/lib/os-vif @@ -797,6 +800,20 @@ fi source $TOP_DIR/tools/fixup_stuff.sh fixup_all +if [[ "$GLOBAL_VENV" == "True" ]] ; then + # TODO(frickler): find a better solution for this + sudo ln -sf /opt/stack/data/venv/bin/cinder-manage /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/rally /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/tox /usr/local/bin + + setup_devstack_virtualenv +fi + # Install subunit for the subunit output stream pip_install -U os-testr @@ -964,6 +981,11 @@ if is_service_enabled tls-proxy; then fix_system_ca_bundle_path fi +if is_service_enabled cinder || [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # os-brick setup required by glance, cinder, and nova + init_os_brick +fi + # Extras Install # -------------- @@ -976,6 +998,9 @@ if use_library_from_git "python-openstackclient"; then setup_dev_lib "python-openstackclient" else pip_install_gr python-openstackclient + if is_service_enabled openstack-cli-server; then + install_openstack_cli_server + fi fi # Installs alias for osc so that we can collect timing for all @@ -1040,22 +1065,6 @@ fi # Save configuration values save_stackenv $LINENO -# Kernel Samepage Merging (KSM) -# ----------------------------- - -# Processes that mark their memory as mergeable can share identical memory -# pages if KSM is enabled. This is particularly useful for nova + libvirt -# backends but any other setup that marks its memory as mergeable can take -# advantage. The drawback is there is higher cpu load; however, we tend to -# be memory bound not cpu bound so enable KSM by default but allow people -# to opt out if the CPU time is more important to them. - -if [[ $ENABLE_KSM == "True" ]] ; then - if [[ -f /sys/kernel/mm/ksm/run ]] ; then - sudo sh -c "echo 1 > /sys/kernel/mm/ksm/run" - fi -fi - # Start Services # ============== @@ -1066,6 +1075,12 @@ fi # A better kind of sysstat, with the top process per time slice start_dstat +if is_service_enabled atop; then + configure_atop + install_atop + start_atop +fi + # Run a background tcpdump for debugging # Note: must set TCPDUMP_ARGS with the enabled service if is_service_enabled tcpdump; then @@ -1159,7 +1174,8 @@ fi # ---- if is_service_enabled q-dhcp; then - # Delete traces of nova networks from prior runs + # TODO(frickler): These are remnants from n-net, check which parts are really + # still needed for Neutron. # Do not kill any dnsmasq instance spawned by NetworkManager netman_pid=$(pidof NetworkManager || true) if [ -z "$netman_pid" ]; then @@ -1219,12 +1235,7 @@ if is_service_enabled nova; then echo_summary "Configuring Nova" init_nova - # Additional Nova configuration that is dependent on other services - # TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If - # not, remove the if here - if is_service_enabled neutron; then - async_runfunc configure_neutron_nova - fi + async_runfunc configure_neutron_nova fi @@ -1284,10 +1295,7 @@ if is_service_enabled ovn-controller ovn-controller-vtep; then start_ovn_services fi -if is_service_enabled neutron-api; then - echo_summary "Starting Neutron" - start_neutron_api -elif is_service_enabled q-svc; then +if is_service_enabled q-svc neutron-api; then echo_summary "Starting Neutron" configure_neutron_after_post_config start_neutron_service_and_check @@ -1304,7 +1312,7 @@ if is_service_enabled neutron; then start_neutron fi # Once neutron agents are started setup initial network elements -if is_service_enabled q-svc && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then +if is_service_enabled q-svc neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then echo_summary "Creating initial neutron network elements" # Here's where plugins can wire up their own networks instead # of the code in lib/neutron_plugins/services/l3 @@ -1512,6 +1520,19 @@ async_cleanup time_totals async_print_timing +if is_service_enabled mysql; then + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" && "$MYSQL_HOST" ]]; then + echo "" + echo "" + echo "Post-stack database query stats:" + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'SELECT * FROM queries' -t 2>/dev/null + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'DELETE FROM queries' 2>/dev/null + fi +fi + + # Using the cloud # =============== diff --git a/stackrc b/stackrc index 0c76de0531..ac9b7ee8ce 100644 --- a/stackrc +++ b/stackrc @@ -75,7 +75,7 @@ if ! isset ENABLED_SERVICES ; then # OVN ENABLED_SERVICES+=,ovn-controller,ovn-northd,ovs-vswitchd,ovsdb-server # Neutron - ENABLED_SERVICES+=,q-svc,q-ovn-metadata-agent + ENABLED_SERVICES+=,q-svc,q-ovn-agent # Dashboard ENABLED_SERVICES+=,horizon # Additional services @@ -85,7 +85,7 @@ fi # Global toggle for enabling services under mod_wsgi. If this is set to # ``True`` all services that use HTTPD + mod_wsgi as the preferred method of # deployment, will be deployed under Apache. If this is set to ``False`` all -# services will rely on the local toggle variable (e.g. ``KEYSTONE_USE_MOD_WSGI``) +# services will rely on the local toggle variable. ENABLE_HTTPD_MOD_WSGI_SERVICES=True # Set the default Nova APIs to enable @@ -121,24 +121,11 @@ else SYSTEMCTL="sudo systemctl" fi - -# Whether or not to enable Kernel Samepage Merging (KSM) if available. -# This allows programs that mark their memory as mergeable to share -# memory pages if they are identical. This is particularly useful with -# libvirt backends. This reduces memory usage at the cost of CPU overhead -# to scan memory. We default to enabling it because we tend to be more -# memory constrained than CPU bound. -ENABLE_KSM=$(trueorfalse True ENABLE_KSM) - # Passwords generated by interactive devstack runs if [[ -r $RC_DIR/.localrc.password ]]; then source $RC_DIR/.localrc.password fi -# Control whether Python 3 should be used at all. -# TODO(frickler): Drop this when all consumers are fixed -export USE_PYTHON3=True - # Adding the specific version of Python 3 to this variable will install # the app using that version of the interpreter instead of just 3. _DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)" @@ -146,7 +133,7 @@ export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3}} # Create a virtualenv with this # Use the built-in venv to avoid more dependencies -export VIRTUALENV_CMD="python3 -m venv" +export VIRTUALENV_CMD="python$PYTHON3_VERSION -m venv" # Default for log coloring is based on interactive-or-not. # Baseline assumption is that non-interactive invocations are for CI, @@ -175,14 +162,27 @@ else export PS4='+ $(short_source): ' fi -# Configure Identity API version -# TODO(frickler): Drop this when plugins no longer need it -IDENTITY_API_VERSION=3 - # Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides # each services ${SERVICE}_ENFORCE_SCOPE variables ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) +# Devstack supports the use of a global virtualenv. These variables enable +# and disable this functionality as well as set the path to the virtualenv. +# Note that the DATA_DIR is selected because grenade testing uses a shared +# DATA_DIR but different DEST dirs and we don't want two sets of venvs, +# instead we want one global set. +DEVSTACK_VENV=${DEVSTACK_VENV:-$DATA_DIR/venv} + +# NOTE(kopecmartin): remove this once this is fixed +# https://bugs.launchpad.net/devstack/+bug/2031639 +# This couldn't go to fixup_stuff as that's called after projects +# (e.g. certain paths) are set taking GLOBAL_VENV into account +if [[ "$os_VENDOR" =~ (CentOSStream|Rocky) ]]; then + GLOBAL_VENV=$(trueorfalse False GLOBAL_VENV) +else + GLOBAL_VENV=$(trueorfalse True GLOBAL_VENV) +fi + # Enable use of Python virtual environments. Individual project use of # venvs are controlled by the PROJECT_VENV array; every project with # an entry in the array will be installed into the named venv. @@ -190,8 +190,9 @@ ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) USE_VENV=$(trueorfalse False USE_VENV) # Add packages that need to be installed into a venv but are not in any -# requirmenets files here, in a comma-separated list -ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""} +# requirements files here, in a comma-separated list. +# Currently only used when USE_VENV is true (individual project venvs) +ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""} # This can be used to turn database query logging on and off # (currently only implemented for MySQL backend) @@ -201,6 +202,11 @@ DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) # performance_schema that are of interest to us MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE) +# This can be used to reduce the amount of memory mysqld uses while running. +# These are unscientifically determined, and could reduce performance or +# cause other issues. +MYSQL_REDUCE_MEMORY=$(trueorfalse True MYSQL_REDUCE_MEMORY) + # Set a timeout for git operations. If git is still running when the # timeout expires, the command will be retried up to 3 times. This is # in the format for timeout(1); @@ -217,6 +223,9 @@ GIT_TIMEOUT=${GIT_TIMEOUT:-0} # proxy uwsgi in front of it, or "mod_wsgi", which runs in # apache. mod_wsgi is deprecated, don't use it. WSGI_MODE=${WSGI_MODE:-"uwsgi"} +if [[ "$WSGI_MODE" != "uwsgi" ]]; then + die $LINENO "$WSGI_MODE is no longer a supported WSGI mode. Only uwsgi is valid." +fi # Repositories # ------------ @@ -243,7 +252,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="zed" +DEVSTACK_SERIES="2026.2" ############## # @@ -298,6 +307,9 @@ TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH} TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master} +OSTESTIMAGES_REPO=${OSTESTIMAGES_REPO:-${GIT_BASE}/openstack/os-test-images.git} +OSTESTIMAGES_BRANCH=${OSTESTIMAGES_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +OSTESTIMAGES_DIR=${DEST}/os-test-images ############## # @@ -383,6 +395,10 @@ GITBRANCH["futurist"]=${FUTURIST_BRANCH:-$TARGET_BRANCH} GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git} GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-$TARGET_BRANCH} +# etcd3gw library +GITREPO["etcd3gw"]=${ETCD3GW_REPO:-${GIT_BASE}/openstack/etcd3gw.git} +GITBRANCH["etcd3gw"]=${ETCD3GW_BRANCH:-$BRANCHLESS_TARGET_BRANCH} + # helpful state machines GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git} GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-$TARGET_BRANCH} @@ -566,28 +582,6 @@ GITREPO["os-ken"]=${OS_KEN_REPO:-${GIT_BASE}/openstack/os-ken.git} GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH} GITDIR["os-ken"]=$DEST/os-ken -################## -# -# TripleO / Heat Agent Components -# -################## - -# run-parts script required by os-refresh-config -DIB_UTILS_REPO=${DIB_UTILS_REPO:-${GIT_BASE}/openstack/dib-utils.git} -DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-$BRANCHLESS_TARGET_BRANCH} - -# os-apply-config configuration template tool -OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git} -OAC_BRANCH=${OAC_BRANCH:-$TRAILING_TARGET_BRANCH} - -# os-collect-config configuration agent -OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git} -OCC_BRANCH=${OCC_BRANCH:-$TRAILING_TARGET_BRANCH} - -# os-refresh-config configuration run-parts tool -ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git} -ORC_BRANCH=${ORC_BRANCH:-$TRAILING_TARGET_BRANCH} - ################# # @@ -625,12 +619,13 @@ case "$VIRT_DRIVER" in LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-custom} LIBVIRT_CPU_MODEL=${LIBVIRT_CPU_MODEL:-Nehalem} + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then - # The groups change with newer libvirt. Older Ubuntu used - # 'libvirtd', but now uses libvirt like Debian. Do a quick check - # to see if libvirtd group already exists to handle grenade's case. - LIBVIRT_GROUP=$(cut -d ':' -f 1 /etc/group | grep 'libvirtd$' || true) - LIBVIRT_GROUP=${LIBVIRT_GROUP:-libvirt} + LIBVIRT_GROUP=libvirt else LIBVIRT_GROUP=libvirtd fi @@ -657,20 +652,19 @@ esac # If the file ends in .tar.gz, uncompress the tarball and and select the first # .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel # and "*-initrd*" as the ramdisk -# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz +# example: https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.tar.gz # * disk image (*.img,*.img.gz) # if file ends in .img, then it will be uploaded and registered as a to # glance as a disk image. If it ends in .gz, it is uncompressed first. # example: -# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img -# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz +# https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img +# https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz # * OpenVZ image: # OpenVZ uses its own format of image, and does not support UEC style images -#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image -#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image +#IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.6.3"} CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of @@ -687,11 +681,11 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; *) # otherwise, use the qcow image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac ;; vsphere) @@ -702,7 +696,7 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then # Use the same as the default for libvirt DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac DOWNLOAD_DEFAULT_IMAGES=False fi @@ -716,12 +710,11 @@ fi EXTRA_CACHE_URLS="" # etcd3 defaults -ETCD_VERSION=${ETCD_VERSION:-v3.3.12} -ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"dc5d82df095dae0a2970e4d870b6929590689dd707ae3d33e7b86da0f7f211b6"} -ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"170b848ac1a071fe7d495d404a868a2c0090750b2944f8a260ef1c6125b2b4f4"} -ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"77f807b1b51abbf51e020bb05bdb8ce088cb58260fcd22749ea32eee710463d3"} -# etcd v3.2.x doesn't have anything for s390x -ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""} +ETCD_VERSION=${ETCD_VERSION:-v3.5.21} +ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"adddda4b06718e68671ffabff2f8cee48488ba61ad82900e639d108f2148501c"} +ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"95bf6918623a097c0385b96f139d90248614485e781ec9bee4768dbb6c79c53f"} +ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"6fb6ecb3d1b331eb177dc610a8efad3aceb1f836d6aeb439ba0bfac5d5c2a38c"} +ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-"a211a83961ba8a7e94f7d6343ad769e699db21a715ba4f3b68cf31ea28f9c951"} # Make sure etcd3 downloads the correct architecture if is_arch "x86_64"; then ETCD_ARCH="amd64" @@ -733,15 +726,8 @@ elif is_arch "ppc64le"; then ETCD_ARCH="ppc64le" ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64} elif is_arch "s390x"; then - # An etcd3 binary for s390x is not available on github like it is - # for other arches. Only continue if a custom download URL was - # provided. - if [[ -n "${ETCD_DOWNLOAD_URL}" ]]; then - ETCD_ARCH="s390x" - ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X} - else - exit_distro_not_supported "etcd3. No custom ETCD_DOWNLOAD_URL provided." - fi + ETCD_ARCH="s390x" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X} else exit_distro_not_supported "invalid hardware type - $ETCD_ARCH" fi @@ -800,7 +786,7 @@ NOVA_READY_TIMEOUT=${NOVA_READY_TIMEOUT:-$SERVICE_TIMEOUT} SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5} # Service graceful shutdown timeout -WORKER_TIMEOUT=${WORKER_TIMEOUT:-90} +WORKER_TIMEOUT=${WORKER_TIMEOUT:-80} # Common Configuration # -------------------- @@ -877,7 +863,31 @@ SERVICE_HOST=${SERVICE_HOST:-${DEF_SERVICE_HOST}} # This is either 127.0.0.1 for IPv4 or ::1 for IPv6 SERVICE_LOCAL_HOST=${SERVICE_LOCAL_HOST:-${DEF_SERVICE_LOCAL_HOST}} -REGION_NAME=${REGION_NAME:-RegionOne} +# TUNNEL IP version +# This is the IP version to use for tunnel endpoints +TUNNEL_IP_VERSION=${TUNNEL_IP_VERSION:-4} + +# Validate TUNNEL_IP_VERSION +if [[ $TUNNEL_IP_VERSION != "4" ]] && [[ $TUNNEL_IP_VERSION != "6" ]]; then + die $LINENO "TUNNEL_IP_VERSION must be either 4 or 6" +fi + +if [[ "$TUNNEL_IP_VERSION" == 4 ]]; then + DEF_TUNNEL_ENDPOINT_IP=$HOST_IP +fi + +if [[ "$TUNNEL_IP_VERSION" == 6 ]]; then + # Only die if the user has not over-ridden the endpoint IP + if [[ "$HOST_IPV6" == "" ]] && [[ "$TUNNEL_ENDPOINT_IP" == "" ]]; then + die $LINENO "Could not determine host IPv6 address. See local.conf for suggestions on setting HOST_IPV6." + fi + + DEF_TUNNEL_ENDPOINT_IP=$HOST_IPV6 +fi + +# Allow the use of an alternate address for tunnel endpoints. +# Default is dependent on TUNNEL_IP_VERSION above. +TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-${DEF_TUNNEL_ENDPOINT_IP}} # Configure services to use syslog instead of writing to individual log files SYSLOG=$(trueorfalse False SYSLOG) diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh index 6ed1647f34..fd3896d6ba 100755 --- a/tests/test_ini_config.sh +++ b/tests/test_ini_config.sh @@ -44,6 +44,12 @@ empty = multi = foo1 multi = foo2 +[fff] +ampersand = + +[ggg] +backslash = + [key_with_spaces] rgw special key = something @@ -85,7 +91,7 @@ fi # test iniget_sections VAL=$(iniget_sections "${TEST_INI}") -assert_equal "$VAL" "default aaa bbb ccc ddd eee key_with_spaces \ +assert_equal "$VAL" "default aaa bbb ccc ddd eee fff ggg key_with_spaces \ del_separate_options del_same_option del_missing_option \ del_missing_option_multi del_no_options" @@ -124,6 +130,23 @@ iniset ${SUDO_ARG} ${TEST_INI} bbb handlers "33,44" VAL=$(iniget ${TEST_INI} bbb handlers) assert_equal "$VAL" "33,44" "inset at EOF" +# Test with ampersand in values +for i in `seq 3`; do + iniset ${TEST_INI} fff ampersand '&y' +done +VAL=$(iniget ${TEST_INI} fff ampersand) +assert_equal "$VAL" "&y" "iniset ampersands in option" + +# Test with backslash in value +iniset ${TEST_INI} ggg backslash 'foo\bar' +VAL=$(iniget ${TEST_INI} ggg backslash) +assert_equal "$VAL" 'foo\bar' "iniset backslash in value" + +# Test with both ampersand and backslash +iniset ${TEST_INI} ggg backslash 'foo\bar&baz' +VAL=$(iniget ${TEST_INI} ggg backslash) +assert_equal "$VAL" 'foo\bar&baz' "iniset ampersand and backslash in value" + # test empty option if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then passed "ini_has_option: ddd.empty present" diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 839e3a1328..9552c93c4f 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -45,7 +45,7 @@ ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken os-resource-classes" -ALL_LIBS+=" oslo.limit" +ALL_LIBS+=" oslo.limit etcd3gw" # Generate the above list with # echo ${!GITREPO[@]} diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh index 087aaf468b..30479f245a 100755 --- a/tests/test_meta_config.sh +++ b/tests/test_meta_config.sh @@ -137,6 +137,9 @@ foo=bar [some] random=config +[[test12|run_tests.sh/test.conf]] +foo=bar + [[test-multi-sections|test-multi-sections.conf]] [sec-1] cfg_item1 = abcd @@ -389,13 +392,12 @@ EXPECT_VAL=0 check_result "$VAL" "$EXPECT_VAL" set -e -echo -n "merge_config_group test10 not directory: " +echo -n "merge_config_group test10 create directory: " set +e -# function is expected to fail and exit, running it -# in a subprocess to let this script proceed -(merge_config_group test.conf test10) +STACK_USER=$(id -u -n) +merge_config_group test.conf test10 VAL=$? -EXPECT_VAL=255 +EXPECT_VAL=0 check_result "$VAL" "$EXPECT_VAL" set -e @@ -414,9 +416,21 @@ random = config non = sense' check_result "$VAL" "$EXPECT_VAL" +echo -n "merge_config_group test12 directory as file: " +set +e +# function is expected to fail and exit, running it +# in a subprocess to let this script proceed +(merge_config_group test.conf test12) +VAL=$? +EXPECT_VAL=255 +check_result "$VAL" "$EXPECT_VAL" +set -e + rm -f test.conf test1c.conf test2a.conf \ test-space.conf test-equals.conf test-strip.conf \ test-colon.conf test-env.conf test-multiline.conf \ test-multi-sections.conf test-same.conf rm -rf test-etc +rm -rf does-not-exist-dir + diff --git a/tests/test_package_ordering.sh b/tests/test_package_ordering.sh index bfc2a1954f..f221c821a0 100755 --- a/tests/test_package_ordering.sh +++ b/tests/test_package_ordering.sh @@ -8,7 +8,7 @@ TOP=$(cd $(dirname "$0")/.. && pwd) source $TOP/tests/unittest.sh export LC_ALL=en_US.UTF-8 -PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms $TOP/files/rpms-suse -type f) +PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms -type f) TMPDIR=$(mktemp -d) diff --git a/tools/build_venv.sh b/tools/build_venv.sh index cfa39a82e0..a439163b5d 100755 --- a/tools/build_venv.sh +++ b/tools/build_venv.sh @@ -38,7 +38,7 @@ if [[ -z "$TOP_DIR" ]]; then fi # Build new venv -virtualenv $VENV_DEST +python$PYTHON3_VERSION -m venv --system-site-packages $VENV_DEST # Install modern pip PIP_VIRTUAL_ENV=$VENV_DEST pip_install -U pip diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index 919cacb036..cb8d7aa328 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -44,6 +44,15 @@ fi if ! getent passwd $STACK_USER >/dev/null; then echo "Creating a user called $STACK_USER" useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER + # RHEL based distros create home dir with 700 permissions, + # And Ubuntu 21.04+ with 750, i.e missing executable + # permission for either group or others + # Devstack deploy will have issues with this, fix it by + # adding executable permission + if [[ $(stat -c '%A' $DEST|grep -o x|wc -l) -lt 3 ]]; then + echo "Executable permission missing for $DEST, adding it" + chmod +x $DEST + fi fi echo "Giving stack user passwordless sudo privileges" diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py new file mode 100644 index 0000000000..86e5529c97 --- /dev/null +++ b/tools/dbcounter/dbcounter.py @@ -0,0 +1,121 @@ +import json +import logging +import os +import threading +import time +import queue + +import sqlalchemy +from sqlalchemy.engine import CreateEnginePlugin +from sqlalchemy import event + +# https://docs.sqlalchemy.org/en/14/core/connections.html? +# highlight=createengineplugin#sqlalchemy.engine.CreateEnginePlugin + +LOG = logging.getLogger(__name__) + +# The theory of operation here is that we register this plugin with +# sqlalchemy via an entry_point. It gets loaded by virtue of plugin= +# being in the database connection URL, which gives us an opportunity +# to hook the engines that get created. +# +# We opportunistically spawn a thread, which we feed "hits" to over a +# queue, and which occasionally writes those hits to a special +# database called 'stats'. We access that database with the same user, +# pass, and host as the main connection URL for simplicity. + + +class LogCursorEventsPlugin(CreateEnginePlugin): + def __init__(self, url, kwargs): + self.db_name = url.database + LOG.info('Registered counter for database %s' % self.db_name) + new_url = sqlalchemy.engine.URL.create(url.drivername, + url.username, + url.password, + url.host, + url.port, + 'stats') + + self.engine = sqlalchemy.create_engine(new_url) + self.queue = queue.Queue() + self.thread = None + + def update_url(self, url): + return url.difference_update_query(["dbcounter"]) + + def engine_created(self, engine): + """Hook the engine creation process. + + This is the plug point for the sqlalchemy plugin. Using + plugin=$this in the URL causes this method to be called when + the engine is created, giving us a chance to hook it below. + """ + event.listen(engine, "before_cursor_execute", self._log_event) + + def ensure_writer_thread(self): + self.thread = threading.Thread(target=self.stat_writer, daemon=True) + self.thread.start() + + def _log_event(self, conn, cursor, statement, parameters, context, + executemany): + """Queue a "hit" for this operation to be recorded. + + Attepts to determine the operation by the first word of the + statement, or 'OTHER' if it cannot be determined. + """ + + # Start our thread if not running. If we were forked after the + # engine was created and this plugin was associated, our + # writer thread is gone, so respawn. + if not self.thread or not self.thread.is_alive(): + self.ensure_writer_thread() + + try: + op = statement.strip().split(' ', 1)[0] or 'OTHER' + except Exception: + op = 'OTHER' + + self.queue.put((self.db_name, op)) + + def do_incr(self, db, op, count): + """Increment the counter for (db,op) by count.""" + + query = sqlalchemy.text('INSERT INTO queries (db, op, count) ' + ' VALUES (:db, :op, :count) ' + ' ON DUPLICATE KEY UPDATE count=count+:count') + try: + with self.engine.begin() as conn: + r = conn.execute(query, {'db': db, 'op': op, 'count': count}) + except Exception as e: + LOG.error('Failed to account for access to database %r: %s', + db, e) + + def stat_writer(self): + """Consume messages from the queue and write them in batches. + + This reads "hists" from from a queue fed by _log_event() and + writes (db,op)+=count stats to the database after ten seconds + of no activity to avoid triggering a write for every SELECT + call. Write no less often than every sixty seconds to avoid being + starved by constant activity. + """ + LOG.debug('[%i] Writer thread running' % os.getpid()) + while True: + to_write = {} + last = time.time() + while time.time() - last < 60: + try: + item = self.queue.get(timeout=10) + to_write.setdefault(item, 0) + to_write[item] += 1 + except queue.Empty: + break + + if to_write: + LOG.debug('[%i] Writing DB stats %s' % ( + os.getpid(), + ','.join(['%s:%s=%i' % (db, op, count) + for (db, op), count in to_write.items()]))) + + for (db, op), count in to_write.items(): + self.do_incr(db, op, count) diff --git a/tools/dbcounter/pyproject.toml b/tools/dbcounter/pyproject.toml new file mode 100644 index 0000000000..d74d688997 --- /dev/null +++ b/tools/dbcounter/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["sqlalchemy", "setuptools>=42"] +build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg new file mode 100644 index 0000000000..12300bf619 --- /dev/null +++ b/tools/dbcounter/setup.cfg @@ -0,0 +1,14 @@ +[metadata] +name = dbcounter +author = Dan Smith +author_email = dms@danplanet.com +version = 0.1 +description = A teeny tiny dbcounter plugin for use with devstack +url = http://github.com/openstack/devstack +license = Apache + +[options] +py_modules = dbcounter +entry_points = + [sqlalchemy.plugins] + dbcounter = dbcounter:LogCursorEventsPlugin diff --git a/tools/file_tracker.sh b/tools/file_tracker.sh new file mode 100755 index 0000000000..9c31b30a56 --- /dev/null +++ b/tools/file_tracker.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -o errexit + +# time to sleep between checks +SLEEP_TIME=20 + +function tracker { + echo "Number of open files | Number of open files not in use | Maximum number of files allowed to be opened" + while true; do + cat /proc/sys/fs/file-nr + sleep $SLEEP_TIME + done +} + +function usage { + echo "Usage: $0 [-x] [-s N]" 1>&2 + exit 1 +} + +while getopts ":s:x" opt; do + case $opt in + s) + SLEEP_TIME=$OPTARG + ;; + x) + set -o xtrace + ;; + *) + usage + ;; + esac +done + +tracker diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index f24ac40ad5..9e2818f2cc 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -36,6 +36,12 @@ function fixup_fedora { # Disable selinux to avoid configuring to allow Apache access # to Horizon files (LP#1175444) if selinuxenabled; then + #persit selinux config across reboots + cat << EOF | sudo tee /etc/selinux/config +SELINUX=permissive +SELINUXTYPE=targeted +EOF + # then disable at runtime sudo setenforce 0 fi @@ -78,63 +84,11 @@ function fixup_fedora { # python3-setuptools RPM are deleted, it breaks some tools such as semanage # (used in diskimage-builder) that use the -s flag of the python # interpreter, enforcing the use of the packages from /usr/lib. - # Importing setuptools/pkg_resources in a such environment fails. + # Importing setuptools in a such environment fails. # Enforce the package re-installation to fix those applications. if is_package_installed python3-setuptools; then sudo dnf reinstall -y python3-setuptools fi - # Workaround CentOS 8-stream iputils and systemd Bug - # https://bugzilla.redhat.com/show_bug.cgi?id=2037807 - if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 8 ]]; then - sudo sysctl -w net.ipv4.ping_group_range='0 2147483647' - fi -} - -function fixup_suse { - if ! is_suse; then - return - fi - - # Deactivate and disable apparmor profiles in openSUSE and SLE - # distros to avoid issues with haproxy and dnsmasq. In newer - # releases, systemctl stop apparmor is actually a no-op, so we - # have to use aa-teardown to make sure we've deactivated the - # profiles: - # - # https://www.suse.com/releasenotes/x86_64/SUSE-SLES/15/#fate-325343 - # https://gitlab.com/apparmor/apparmor/merge_requests/81 - # https://build.opensuse.org/package/view_file/openSUSE:Leap:15.2/apparmor/apparmor.service?expand=1 - if sudo systemctl is-active -q apparmor; then - sudo systemctl stop apparmor - fi - if [ -x /usr/sbin/aa-teardown ]; then - sudo /usr/sbin/aa-teardown - fi - if sudo systemctl is-enabled -q apparmor; then - sudo systemctl disable apparmor - fi - - # Since pip10, pip will refuse to uninstall files from packages - # that were created with distutils (rather than more modern - # setuptools). This is because it technically doesn't have a - # manifest of what to remove. However, in most cases, simply - # overwriting works. So this hacks around those packages that - # have been dragged in by some other system dependency - sudo rm -rf /usr/lib/python3.6/site-packages/ply-*.egg-info - sudo rm -rf /usr/lib/python3.6/site-packages/six-*.egg-info - - # Ensure trusted CA certificates are up to date - # See https://bugzilla.suse.com/show_bug.cgi?id=1154871 - # May be removed once a new opensuse-15 image is available in nodepool - sudo zypper up -y p11-kit ca-certificates-mozilla -} - -function fixup_ovn_centos { - if [[ $os_VENDOR != "CentOS" ]]; then - return - fi - # OVN packages are part of this release for CentOS - yum_install centos-release-openstack-victoria } function fixup_ubuntu { @@ -153,32 +107,7 @@ function fixup_ubuntu { sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info } -function fixup_openeuler { - if ! is_openeuler; then - return - fi - - if is_arch "x86_64"; then - arch="x86_64" - elif is_arch "aarch64"; then - arch="aarch64" - fi - - # Some packages' version in openEuler are too old, use the newer ones we - # provide in oepkg. (oepkg is an openEuler third part yum repo which is - # endorsed by openEuler community) - (echo '[openstack-ci]' - echo 'name=openstack' - echo 'baseurl=https://repo.oepkgs.net/openEuler/rpm/openEuler-20.03-LTS-SP2/budding-openeuler/openstack-master-ci/'$arch'/' - echo 'enabled=1' - echo 'gpgcheck=0') | sudo tee -a /etc/yum.repos.d/openstack-master.repo > /dev/null - - yum_install liberasurecode-devel -} - function fixup_all { fixup_ubuntu fixup_fedora - fixup_suse - fixup_openeuler } diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py index 1cacd06bf8..bc28515a26 100644 --- a/tools/generate-devstack-plugins-list.py +++ b/tools/generate-devstack-plugins-list.py @@ -73,8 +73,11 @@ def has_devstack_plugin(session, proj): s = requests.Session() # sometimes gitea gives us a 500 error; retry sanely # https://stackoverflow.com/a/35636367 +# We need to disable raise_on_status because if any repo endup with 500 then +# propose-updates job which run this script will fail. retries = Retry(total=3, backoff_factor=1, - status_forcelist=[ 500 ]) + status_forcelist=[ 500 ], + raise_on_status=False) s.mount('https://', HTTPAdapter(max_retries=retries)) found_plugins = filter(functools.partial(has_devstack_plugin, s), projects) diff --git a/tools/generate-devstack-plugins-list.sh b/tools/generate-devstack-plugins-list.sh index 3307943df9..45a926392e 100755 --- a/tools/generate-devstack-plugins-list.sh +++ b/tools/generate-devstack-plugins-list.sh @@ -38,6 +38,17 @@ # current working directory, it will be prepended or appended to # the generated reStructuredText plugins table respectively. +# Setup virtual environment +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +VENV_DIR="${SCRIPT_DIR}/.venv" + +if [[ ! -d "${VENV_DIR}" ]]; then + python3 -m venv "${VENV_DIR}" +fi + +source "${VENV_DIR}/bin/activate" +pip install -q -r "${SCRIPT_DIR}/requirements.txt" + # Print the title underline for a RST table. Argument is the length # of the first column, second column is assumed to be "URL" function title_underline { @@ -54,7 +65,7 @@ if [[ -r data/devstack-plugins-registry.header ]]; then cat data/devstack-plugins-registry.header fi -sorted_plugins=$(python3 tools/generate-devstack-plugins-list.py) +sorted_plugins=$("${VENV_DIR}/bin/python3" tools/generate-devstack-plugins-list.py) # find the length of the name column & pad name_col_len=$(echo "${sorted_plugins}" | wc -L) diff --git a/tools/get-stats.py b/tools/get-stats.py index 670e723e82..b958af61b2 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -1,10 +1,12 @@ #!/usr/bin/python3 import argparse +import csv import datetime import glob import itertools import json +import logging import os import re import socket @@ -25,6 +27,8 @@ print('No pymysql, database information will not be included', file=sys.stderr) +LOG = logging.getLogger('perf') + # https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion @@ -50,7 +54,8 @@ def get_service_stats(service): def get_services_stats(): services = [os.path.basename(s) for s in - glob.glob('/etc/systemd/system/devstack@*.service')] + glob.glob('/etc/systemd/system/devstack@*.service')] + \ + ['apache2.service'] return [dict(service=service, **get_service_stats(service)) for service in services] @@ -82,14 +87,20 @@ def proc_matches(proc): def get_db_stats(host, user, passwd): dbs = [] - db = pymysql.connect(host=host, user=user, password=passwd, - database='performance_schema', - cursorclass=pymysql.cursors.DictCursor) + try: + db = pymysql.connect(host=host, user=user, password=passwd, + database='stats', + cursorclass=pymysql.cursors.DictCursor) + except pymysql.err.OperationalError as e: + if 'Unknown database' in str(e): + print('No stats database; assuming devstack failed', + file=sys.stderr) + return [] + raise + with db: with db.cursor() as cur: - cur.execute( - 'SELECT COUNT(*) AS queries,current_schema AS db FROM ' - 'events_statements_history_long GROUP BY current_schema') + cur.execute('SELECT db,op,count FROM queries') for row in cur: dbs.append({k: tryint(v) for k, v in row.items()}) return dbs @@ -97,26 +108,65 @@ def get_db_stats(host, user, passwd): def get_http_stats_for_log(logfile): stats = {} - for line in open(logfile).readlines(): - m = re.search('"([A-Z]+) /([^" ]+)( HTTP/1.1)?" ([0-9]{3}) ([0-9]+)', - line) - if m: - method = m.group(1) - path = m.group(2) - status = m.group(4) - size = int(m.group(5)) - - try: - service, rest = path.split('/', 1) - except ValueError: - # Root calls like "GET /identity" - service = path - rest = '' - - stats.setdefault(service, {'largest': 0}) - stats[service].setdefault(method, 0) - stats[service][method] += 1 - stats[service]['largest'] = max(stats[service]['largest'], size) + apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status', + 'length', 'c', 'agent') + ignore_agents = ('curl', 'uwsgi', 'nova-status') + ignored_services = set() + for line in csv.reader(open(logfile), delimiter=' '): + fields = dict(zip(apache_fields, line)) + if len(fields) != len(apache_fields): + # Not a combined access log, so we can bail completely + return [] + try: + method, url, http = fields['request'].split(' ') + except ValueError: + method = url = http = '' + if 'HTTP' not in http: + # Not a combined access log, so we can bail completely + return [] + + # Tempest's User-Agent is unchanged, but client libraries and + # inter-service API calls use proper strings. So assume + # 'python-urllib' is tempest so we can tell it apart. + if 'python-urllib' in fields['agent'].lower(): + agent = 'tempest' + else: + agent = fields['agent'].split(' ')[0] + if agent.startswith('python-'): + agent = agent.replace('python-', '') + if '/' in agent: + agent = agent.split('/')[0] + + if agent in ignore_agents: + continue + + try: + service, rest = url.strip('/').split('/', 1) + except ValueError: + # Root calls like "GET /identity" + service = url.strip('/') + rest = '' + + if not service.isalpha(): + ignored_services.add(service) + continue + + method_key = '%s-%s' % (agent, method) + try: + length = int(fields['length']) + except ValueError: + LOG.warning('[%s] Failed to parse length %r from line %r' % ( + logfile, fields['length'], line)) + length = 0 + stats.setdefault(service, {'largest': 0}) + stats[service].setdefault(method_key, 0) + stats[service][method_key] += 1 + stats[service]['largest'] = max(stats[service]['largest'], + length) + + if ignored_services: + LOG.warning('Ignored services: %s' % ','.join( + sorted(ignored_services))) # Flatten this for ES return [{'service': service, 'log': os.path.basename(logfile), @@ -133,6 +183,7 @@ def get_report_info(): return { 'timestamp': datetime.datetime.now().isoformat(), 'hostname': socket.gethostname(), + 'version': 2, } @@ -154,6 +205,8 @@ def get_report_info(): '(default is %s)' % ','.join(process_defaults))) args = parser.parse_args() + logging.basicConfig(level=logging.WARNING) + data = { 'services': get_services_stats(), 'db': pymysql and args.db_pass and get_db_stats(args.db_host, diff --git a/tools/install_pip.sh b/tools/install_pip.sh index e9c52eacb7..027693fc0a 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -38,7 +38,6 @@ FILES=$TOP_DIR/files # [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"} -PIP_GET_PIP36_URL=${PIP_GET_PIP36_URL:-"https://bootstrap.pypa.io/pip/3.6/get-pip.py"} GetDistro echo "Distro: $DISTRO" @@ -57,14 +56,8 @@ function get_versions { function install_get_pip { - if [[ "$PYTHON3_VERSION" = "3.6" ]]; then - _pip_url=$PIP_GET_PIP36_URL - _local_pip="$FILES/$(basename $_pip_url)-py36" - else - _pip_url=$PIP_GET_PIP_URL - _local_pip="$FILES/$(basename $_pip_url)" - fi - + _pip_url=$PIP_GET_PIP_URL + _local_pip="$FILES/$(basename $_pip_url)" # If get-pip.py isn't python, delete it. This was probably an # outage on the server. @@ -127,7 +120,7 @@ if [[ -n $PYPI_ALTERNATIVE_URL ]]; then configure_pypi_alternative_url fi -if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then +if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel* ]]; then # get-pip.py will not install over the python3-pip package in # Fedora 34 any more. # https://bugzilla.redhat.com/show_bug.cgi?id=1988935 @@ -139,15 +132,18 @@ if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then # recent enough anyway. This is included via rpms/general : # Simply fall through elif is_ubuntu; then - : # pip on Ubuntu 20.04 is new enough, too + # pip on Ubuntu 20.04 and higher is new enough, too + # drop setuptools from u-c + sed -i -e '/setuptools/d' $REQUIREMENTS_DIR/upper-constraints.txt else install_get_pip + + # Note setuptools is part of requirements.txt and we want to make sure + # we obey any versioning as described there. + pip_install_gr setuptools fi set -x -# Note setuptools is part of requirements.txt and we want to make sure -# we obey any versioning as described there. -pip_install_gr setuptools get_versions diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index a7c03d26cd..bb470b2927 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -74,13 +74,13 @@ install_package $PACKAGES if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then if is_ubuntu || is_fedora; then install_package rsyslog-relp - elif is_suse; then - install_package rsyslog-module-relp else exit_distro_not_supported "rsyslog-relp installation" fi fi +# TODO(clarkb) remove these once we are switched to global venv by default +export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null) # Mark end of run # --------------- diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh index 6c36534f01..2f404c26fb 100755 --- a/tools/memory_tracker.sh +++ b/tools/memory_tracker.sh @@ -14,7 +14,12 @@ set -o errexit -PYTHON=${PYTHON:-python3} +# TODO(frickler): make this use stackrc variables +if [ -x /opt/stack/data/venv/bin/python ]; then + PYTHON=/opt/stack/data/venv/bin/python +else + PYTHON=${PYTHON:-python3} +fi # time to sleep between checks SLEEP_TIME=20 diff --git a/tools/mlock_report.py b/tools/mlock_report.py index 1b081bbe6f..8cbda15895 100644 --- a/tools/mlock_report.py +++ b/tools/mlock_report.py @@ -6,7 +6,7 @@ LCK_SUMMARY_REGEX = re.compile( - "^VmLck:\s+(?P[\d]+)\s+kB", re.MULTILINE) + r"^VmLck:\s+(?P[\d]+)\s+kB", re.MULTILINE) def main(): diff --git a/tools/outfilter.py b/tools/outfilter.py index e910f79ff2..3955d39794 100644 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -26,8 +26,8 @@ import re import sys -IGNORE_LINES = re.compile('(set \+o|xtrace)') -HAS_DATE = re.compile('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|') +IGNORE_LINES = re.compile(r'(set \+o|xtrace)') +HAS_DATE = re.compile(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|') def get_options(): @@ -76,7 +76,8 @@ def main(): # with zuulv3 native jobs and ansible capture it may become # clearer what to do if HAS_DATE.search(line) is None: - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc).replace( + tzinfo=None) ts_line = ("%s | %s" % ( now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], line)) @@ -89,13 +90,10 @@ def main(): if outfile: # We've opened outfile as a binary file to get the - # non-buffered behaviour. on python3, sys.stdin was + # non-buffered behaviour. on python3, sys.stdin was # opened with the system encoding and made the line into # utf-8, so write the logfile out in utf-8 bytes. - if sys.version_info < (3,): - outfile.write(ts_line) - else: - outfile.write(ts_line.encode('utf-8')) + outfile.write(ts_line.encode('utf-8')) outfile.flush() diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh index 73fe3f3bdf..2b65fd0fb3 100755 --- a/tools/ping_neutron.sh +++ b/tools/ping_neutron.sh @@ -21,7 +21,7 @@ set -o pipefail TOP_DIR=$(cd $(dirname "$0")/.. && pwd) -# This *must* be run as the admin tenant +# This *must* be run as the admin project source $TOP_DIR/openrc admin admin function usage { @@ -29,8 +29,9 @@ function usage { ping_neutron.sh [ping args] This provides a wrapper to ping neutron guests that are on isolated -tenant networks that the caller can't normally reach. It does so by -creating a network namespace probe. +project networks that the caller can't normally reach. It does so by +using either the DHCP or Metadata network namespace to support both +ML2/OVS and OVN. It takes arguments like ping, except the first arg must be the network name. @@ -44,6 +45,12 @@ EOF exit 1 } +# BUG: with duplicate network names, this fails pretty hard since it +# will just pick the first match. +function _get_net_id { + openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | head -n 1 | awk '{print $2}' +} + NET_NAME=$1 if [[ -z "$NET_NAME" ]]; then @@ -53,12 +60,11 @@ fi REMAINING_ARGS="${@:2}" -# BUG: with duplicate network names, this fails pretty hard. -NET_ID=$(openstack network show -f value -c id "$NET_NAME") -PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1) +NET_ID=`_get_net_id $NET_NAME` +NET_NS=$(ip netns list | grep "$NET_ID" | head -n 1) # This runs a command inside the specific netns -NET_NS_CMD="ip netns exec qprobe-$PROBE_ID" +NET_NS_CMD="ip netns exec $NET_NS" PING_CMD="sudo $NET_NS_CMD ping $REMAINING_ARGS" echo "Running $PING_CMD" diff --git a/tools/requirements.txt b/tools/requirements.txt new file mode 100644 index 0000000000..f2293605cf --- /dev/null +++ b/tools/requirements.txt @@ -0,0 +1 @@ +requests diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index 74dcdb2a07..87312d9469 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -14,14 +14,14 @@ # Update the clouds.yaml file. - import argparse import os.path +import sys import yaml -class UpdateCloudsYaml(object): +class UpdateCloudsYaml: def __init__(self, args): if args.file: self._clouds_path = args.file @@ -35,25 +35,26 @@ def __init__(self, args): self._cloud = args.os_cloud self._cloud_data = { 'region_name': args.os_region_name, - 'identity_api_version': args.os_identity_api_version, - 'volume_api_version': args.os_volume_api_version, 'auth': { 'auth_url': args.os_auth_url, 'username': args.os_username, + 'user_domain_id': 'default', 'password': args.os_password, }, } + if args.os_project_name and args.os_system_scope: print( - "WARNING: os_project_name and os_system_scope were both" - " given. os_system_scope will take priority.") - if args.os_project_name and not args.os_system_scope: + "WARNING: os_project_name and os_system_scope were both " + "given. os_system_scope will take priority." + ) + + if args.os_system_scope: # system-scoped + self._cloud_data['auth']['system_scope'] = args.os_system_scope + elif args.os_project_name: # project-scoped self._cloud_data['auth']['project_name'] = args.os_project_name - if args.os_identity_api_version == '3' and not args.os_system_scope: - self._cloud_data['auth']['user_domain_id'] = 'default' self._cloud_data['auth']['project_domain_id'] = 'default' - if args.os_system_scope: - self._cloud_data['auth']['system_scope'] = args.os_system_scope + if args.os_cacert: self._cloud_data['cacert'] = args.os_cacert @@ -89,8 +90,6 @@ def main(): parser.add_argument('--file') parser.add_argument('--os-cloud', required=True) parser.add_argument('--os-region-name', default='RegionOne') - parser.add_argument('--os-identity-api-version', default='3') - parser.add_argument('--os-volume-api-version', default='3') parser.add_argument('--os-cacert') parser.add_argument('--os-auth-url', required=True) parser.add_argument('--os-username', required=True) diff --git a/tools/verify-ipv6-address.py b/tools/verify-ipv6-address.py new file mode 100644 index 0000000000..dc18fa6d8a --- /dev/null +++ b/tools/verify-ipv6-address.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +import argparse +import ipaddress +import sys + +def main(): + parser = argparse.ArgumentParser( + description="Check if a given string is a valid IPv6 address.", + formatter_class=argparse.RawTextHelpFormatter, + ) + parser.add_argument( + "address", + help=( + "The IPv6 address string to validate.\n" + "Examples:\n" + " 2001:0db8:85a3:0000:0000:8a2e:0370:7334\n" + " 2001:db8::1\n" + " ::1\n" + " fe80::1%eth0 (scope IDs are handled)" + ), + ) + args = parser.parse_args() + + try: + # try to create a IPv6Address: if we fail to parse or get an + # IPv4Address then die + ip_obj = ipaddress.ip_address(args.address.strip('[]')) + if isinstance(ip_obj, ipaddress.IPv6Address): + sys.exit(0) + else: + sys.exit(1) + except ValueError: + sys.exit(1) + except Exception as e: + print(f"An unexpected error occurred during validation: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh index 2596395165..a1acecbb3f 100755 --- a/tools/verify-ipv6-only-deployments.sh +++ b/tools/verify-ipv6-only-deployments.sh @@ -23,37 +23,42 @@ function verify_devstack_ipv6_setting { _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d []) local _service_local_host='' _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d []) + local _tunnel_endpoint_ip='' + _tunnel_endpoint_ip=$(echo $TUNNEL_ENDPOINT_IP | tr -d []) if [[ "$SERVICE_IP_VERSION" != 6 ]]; then echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address." exit 1 fi - is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))') - if [[ "$is_service_host_ipv6" != "True" ]]; then - echo $SERVICE_HOST "SERVICE_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address." + if [[ "$TUNNEL_IP_VERSION" != 6 ]]; then + echo $TUNNEL_IP_VERSION "TUNNEL_IP_VERSION is not set to 6 so TUNNEL_ENDPOINT_IP cannot be an IPv6 address." exit 1 fi - is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))') - if [[ "$is_host_ipv6" != "True" ]]; then - echo $HOST_IPV6 "HOST_IPV6 is not ipv6 which means devstack cannot deploy services on IPv6 address." + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_host"; then + echo $SERVICE_HOST "SERVICE_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi - is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))') - if [[ "$is_service_listen_address" != "True" ]]; then - echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not ipv6 which means devstack cannot deploy services on IPv6 address." + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_host_ipv6"; then + echo $HOST_IPV6 "HOST_IPV6 is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi - is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))') - if [[ "$is_service_local_host" != "True" ]]; then - echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address." + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_listen_address"; then + echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_local_host"; then + echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_tunnel_endpoint_ip"; then + echo $TUNNEL_ENDPOINT_IP "TUNNEL_ENDPOINT_IP is not IPv6 which means devstack will not deploy with an IPv6 endpoint address." exit 1 fi echo "Devstack is properly configured with IPv6" - echo "SERVICE_IP_VERSION: " $SERVICE_IP_VERSION "HOST_IPV6: " $HOST_IPV6 "SERVICE_HOST: " $SERVICE_HOST "SERVICE_LISTEN_ADDRESS: " $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST: " $SERVICE_LOCAL_HOST + echo "SERVICE_IP_VERSION:" $SERVICE_IP_VERSION "HOST_IPV6:" $HOST_IPV6 "SERVICE_HOST:" $SERVICE_HOST "SERVICE_LISTEN_ADDRESS:" $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST:" $SERVICE_LOCAL_HOST "TUNNEL_IP_VERSION:" $TUNNEL_IP_VERSION "TUNNEL_ENDPOINT_IP:" $TUNNEL_ENDPOINT_IP } function sanity_check_system_ipv6_enabled { - system_ipv6_enabled=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_ipv6_enabled())') - if [[ $system_ipv6_enabled != "True" ]]; then + if [ ! -f "/proc/sys/net/ipv6/conf/default/disable_ipv6" ] || [ "$(cat /proc/sys/net/ipv6/conf/default/disable_ipv6)" -ne "0" ]; then echo "IPv6 is disabled in system" exit 1 fi @@ -67,12 +72,10 @@ function verify_service_listen_address_is_ipv6 { for endpoint in ${endpoints}; do local endpoint_address='' endpoint_address=$(echo "$endpoint" | awk -F/ '{print $3}' | awk -F] '{print $1}') - endpoint_address=$(echo $endpoint_address | tr -d []) - local is_endpoint_ipv6='' - is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))') - if [[ "$is_endpoint_ipv6" != "True" ]]; then + endpoint_address=$(echo $endpoint_address | tr -d '[]') + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$endpoint_address"; then all_ipv6=False - echo $endpoint ": This is not ipv6 endpoint which means corresponding service is not listening on IPv6 address." + echo $endpoint ": This is not an IPv6 endpoint which means corresponding service is not listening on an IPv6 address." continue fi endpoints_verified=True @@ -80,7 +83,7 @@ function verify_service_listen_address_is_ipv6 { if [[ "$all_ipv6" == "False" ]] || [[ "$endpoints_verified" == "False" ]]; then exit 1 fi - echo "All services deployed by devstack is on IPv6 endpoints" + echo "All services deployed by devstack are on IPv6 endpoints" echo $endpoints } diff --git a/tools/worlddump.py b/tools/worlddump.py index e2921737db..26ced3f653 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -19,7 +19,6 @@ import argparse import datetime -from distutils import spawn import fnmatch import io import os @@ -32,7 +31,6 @@ 'nova-compute', 'neutron-dhcp-agent', 'neutron-l3-agent', - 'neutron-linuxbridge-agent', 'neutron-metadata-agent', 'neutron-openvswitch-agent', 'cinder-volume', @@ -52,7 +50,7 @@ def get_options(): def filename(dirname, name=""): - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc) fmt = "worlddump-%Y-%m-%d-%H%M%S" if name: fmt += "-" + name @@ -76,7 +74,7 @@ def _dump_cmd(cmd): def _find_cmd(cmd): - if not spawn.find_executable(cmd): + if not shutil.which(cmd): print("*** %s not found: skipping" % cmd) return False return True diff --git a/tox.ini b/tox.ini index ec764abc87..26cd68c031 100644 --- a/tox.ini +++ b/tox.ini @@ -12,7 +12,7 @@ basepython = python3 # against devstack, just set BASHATE_INSTALL_PATH=/path/... to your # modified bashate tree deps = - {env:BASHATE_INSTALL_PATH:bashate==2.0.0} + {env:BASHATE_INSTALL_PATH:bashate} allowlist_externals = bash commands = bash -c "find {toxinidir} \ -not \( -type d -name .?\* -prune \) \ diff --git a/unstack.sh b/unstack.sh index 813f9a8117..1919ef8ad7 100755 --- a/unstack.sh +++ b/unstack.sh @@ -61,7 +61,6 @@ source $TOP_DIR/lib/tls # Source project function libraries source $TOP_DIR/lib/infra -source $TOP_DIR/lib/oslo source $TOP_DIR/lib/lvm source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone @@ -73,6 +72,8 @@ source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/atop +source $TOP_DIR/lib/tcpdump source $TOP_DIR/lib/etcd3 # Extras Source @@ -87,6 +88,12 @@ fi load_plugin_settings +# Enable neutron server services so they can be properly stopped +# This replicates the service enabling logic from stack.sh +if is_service_enabled neutron; then + enable_neutron_server_services +fi + set -o xtrace # Run extras @@ -156,6 +163,11 @@ if [[ -n "$UNSTACK_ALL" ]]; then if is_service_enabled rabbit; then stop_service rabbitmq-server fi + + # Stop LDAP server + if is_service_enabled ldap; then + stop_ldap + fi fi if is_service_enabled neutron; then @@ -168,8 +180,18 @@ if is_service_enabled etcd3; then cleanup_etcd3 fi -if is_service_enabled dstat; then - stop_dstat +if is_service_enabled openstack-cli-server; then + stop_service devstack@openstack-cli-server +fi + +if is_service_enabled tcpdump; then + stop_tcpdump +fi + +stop_dstat + +if is_service_enabled atop; then + stop_atop fi # NOTE: Cinder automatically installs the lvm2 package, independently of the @@ -185,4 +207,4 @@ rm -Rf $DEST/async # Clean any safe.directory items we wrote into the global # gitconfig. We can identify the relevant ones by checking that they # point to somewhere in our $DEST directory. -sudo sed -i "/directory=${DEST}/ d" /etc/gitconfig +sudo sed -i "\+directory = ${DEST}+ d" /etc/gitconfig