diff --git a/.gitignore b/.gitignore
index f63b371..eb76620 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,10 @@
geekodoc/rng/docbookxi.rnc
geekodoc/rng/docbookxi.rng
+geekodoc/rng/*.srng
+geekodoc/rng/catalog.dtd
+geekodoc/rng/geekodoc5*.rng
+geekodoc/tests/*.err
build/
autom4te.cache/
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..3abf1d7
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,29 @@
+# config file for automatic testing at travis-ci.org
+language: python
+python:
+ - 3.5
+sudo:
+ required
+env:
+ global:
+ - LD_PRELOAD=/lib/x86_64-linux-gnu/libSegFault.so
+
+
+# command to install dependencies
+before_install:
+ # - sudo apt-get -qq update
+ - sudo apt-get install -y libxml2-dev libxml2-utils xsltproc jing trang make
+ # docbook5-xml
+ - cat /etc/os-release
+
+install:
+ - pip install rnginline
+ # Workaround to download DocBook 5.1 schema directly
+ - sudo mkdir -p /usr/share/xml/docbook/schema/rng/5.1/
+ - sudo wget -P /usr/share/xml/docbook/schema/rng/5.1/ http://docs.oasis-open.org/docbook/docbook/v5.1/cos01/schemas/rng/docbookxi.rnc http://docs.oasis-open.org/docbook/docbook/v5.1/cos01/schemas/rng/docbookxi.rng
+ - make VERBOSE=1 -C geekodoc/rng
+
+# commands to run tests
+script:
+ - ./geekodoc/tests/run-tests.sh
+ - ./novdoc/tests/run-tests.sh
diff --git a/ChangeLog b/ChangeLog
index 32063cb..6c55e0e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+------------------------------------------------------------------
+Fri Sept 15 09:16:00 UTC 2016 - toms@opensuse.org
+
+Release 0.9.8
+
+* GeekoDoc
+ * Restrict Possible Values in format Attribute (#24)
+ * Fix outdated Schematron rules (#22)
+ * Implement task elements (#20)
+ * Adapt content model of task* elements (#1)
+ * Add test cases and use Travis to check schema
+
------------------------------------------------------------------
Thu Dec 01 09:52:00 UTC 2016 - toms@opensuse.org
diff --git a/README.md b/README.md
index 3f96f43..f7e73f4 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,6 @@
# SUSE Schemas
+[![Build Status](https://travis-ci.org/openSUSE/geekodoc.svg?branch=develop)](https://travis-ci.org/openSUSE/geekodoc)
## NovDoc
diff --git a/geekodoc/rng/Makefile b/geekodoc/rng/Makefile
index 3214c0f..ab98d3c 100644
--- a/geekodoc/rng/Makefile
+++ b/geekodoc/rng/Makefile
@@ -7,46 +7,64 @@
# Requirements:
# * trang
# * docbook_5
-# * python3-rnginline (from obs://home:thomas-schraitle/python3-rnginline)
+# * python3-rnginline (from obs://devel:languages:python3/python3-rnginline)
.SUFFIXES: .rng rnc
SUSESCHEMA := geekodoc5
+SUSESCHEMA_RNC := $(SUSESCHEMA)-flat.rnc
+SUSESCHEMA_RNG := $(patsubst %.rnc, %.rng, $(SUSESCHEMA_RNC))
DOCBOOKXI_RNC_PATH := /usr/share/xml/docbook/schema/rng/5.1/docbookxi.rnc
+DOCBOOKXI_RNG_PATH := $(patsubst %.rnc, %.rng, $(DOCBOOKXI_RNC_PATH))
DOCBOOKXI_RNC := $(notdir $(DOCBOOKXI_RNC_PATH))
-DOCBOOKXI_RNG := $(patsubst .rnc,.rng,$(DOCBOOKXI_RNC))
+DOCBOOKXI_RNG := $(patsubst %.rnc, %.rng, $(DOCBOOKXI_RNC))
.PHONY: all clean
-all: $(SUSESCHEMA)-flat.rnc
+all: $(SUSESCHEMA_RNC) $(SUSESCHEMA_RNG)
clean:
- rm $(SUSESCHEMA)-flat.rnc $(DOCBOOKXI_RNC) $(DOCBOOKXI_RNG) 2>/dev/null || true
+ rm $(DOCBOOKXI_RNC) $(DOCBOOKXI_RNG) $(SUSESCHEMA)*.rng \
+ transclusion.rng \
+ 2>/dev/null || true
-.INTERMEDIATE: $(SUSESCHEMA).rng $(SUSESCHEMA)-flat.rni $(SUSESCHEMA)-flat.rng
-
+#
+# HINT:
+# We can't just link it from the system, we need to apply
+# a stylesheet to fix some Schematron pattern rules first
+# (see openSUSE/geekodoc#22)
+# From here we can create the RNC. Here it is a visual
+# presentation:
+#
+# DB RNG --[XSLT]--> DB RNG2 --[trang]--> DB RNC
+#
+$(DOCBOOKXI_RNG): $(DOCBOOKXI_RNG_PATH)
+ @echo "* Fixing DocBook RNG schema..."
+ xsltproc --output $@ ../xsl/sch-fix.xsl $<
-$(DOCBOOKXI_RNC): $(DOCBOOKXI_RNC_PATH)
- @echo "* Linking $< -> $@"
- ln -sf $<
+$(DOCBOOKXI_RNC): $(DOCBOOKXI_RNG)
+ @echo "* Converting DocBook $< -> $@"
+ trang $< $@
+# .INTERMEDIATE: $(SUSESCHEMA).rng
$(SUSESCHEMA).rng: $(SUSESCHEMA).rnc $(DOCBOOKXI_RNC)
@echo "* Converting $< -> $@"
trang $< $@
+.INTERMEDIATE: $(SUSESCHEMA)-flat.rni
$(SUSESCHEMA)-flat.rni: $(SUSESCHEMA).rng
@echo "* Flattening $< -> $@"
rnginline $< $@
-$(SUSESCHEMA)-flat.rng: $(SUSESCHEMA)-flat.rni
- echo '* Cleaning up schema contents $< -> $@'
+# .INTERMEDIATE: $(SUSESCHEMA)-flat.rng
+$(SUSESCHEMA_RNG): $(SUSESCHEMA)-flat.rni
+ @echo '* Cleaning up schema contents $< -> $@'
xmllint -o $@ --nsclean --format $<
-$(SUSESCHEMA)-flat.rnc: $(SUSESCHEMA)-flat.rng
+$(SUSESCHEMA_RNC): $(SUSESCHEMA_RNG)
@echo "* Converting $< -> $@"
trang $< $@
@sed -i -r 's_\s+$$__' $@
-
diff --git a/geekodoc/rng/geekodoc5-flat.rnc b/geekodoc/rng/geekodoc5-flat.rnc
index cae372b..f747662 100644
--- a/geekodoc/rng/geekodoc5-flat.rnc
+++ b/geekodoc/rng/geekodoc5-flat.rnc
@@ -13,7 +13,12 @@ namespace trans = "http://docbook.org/ns/transclusion"
namespace xi = "http://www.w3.org/2001/XInclude"
namespace xlink = "http://www.w3.org/1999/xlink"
-suse.schema.version = "5.1-subset GeekoDoc-0.9.6"
+# Define some namespaces for Schematron
+
+s:ns [ uri = "http://docbook.org/ns/docbook" prefix = "db" ]
+s:ns [ uri = "http://www.w3.org/1999/xlink" prefix = "xlink" ]
+# Constants
+suse.schema.version = "5.1-subset GeekoDoc-0.9.8"
#
div {
div {
@@ -147,13 +152,54 @@ div {
& db.xlink.show.attribute?
& db.xlink.actuate.attribute?
}
+div {
+ db.format.enumeration =
+
+ ## Allowed formats for SUSE documentation
+ (
+ ## Format of the dia tool
+ "DIA"
+ |
+ ## Encapsulated PostScript
+ "EPS"
+ |
+ ## Format of the xfig tool
+ "FIG"
+ |
+ ## LibreOffice illustration format
+ "ODG"
+ |
+ ## Portable Document Format
+ "PDF"
+ |
+ ## Portable Network Graphics
+ "PNG"
+ |
+ ## Scalable Vector Graphics
+ "SVG")
+ # The original list from the DocBook 4.5 DTD was:
+ # "BMP| CGM-CHAR | CGM-BINARY | CGM-CLEAR | DITROFF | DVI
+ # | EPS | EQN | FAX | GIF | GIF87a | GIF89a
+ # | JPG | JPEG | IGES | PCX
+ # | PIC | PNG | PS | SGML | TBL | TEX | TIFF | WMF | WPG
+ # | SVG | PDF | SWF
+ # | linespecific
+
+}
# Use a catalog entry to resolve this:
# include "http://docbook.org/xml/5.1/rng/docbook.rnc"
div {
- div {
- # This file is part of DocBook XInclude V5.1CR3
+ [
+ # DocBook Version 5.1
+ # Candidate OASIS Standard 01
+ # 23 June 2016
+ # Copyright (c) OASIS Open 2016. All Rights Reserved.
+ # Source: http://docs.oasis-open.org/docbook/docbook/v5.1/cos01/schemas/
+ # Link to latest version of specification: http://docs.oasis-open.org/docbook/docbook/v5.1/docbook-v5.1.html
+ #
+ # This file is part of DocBook XInclude V5.1-COS01
#
- # Copyright 1992-2011 HaL Computer Systems, Inc.,
+ # Copyright 1992-2016 HaL Computer Systems, Inc.,
# O'Reilly & Associates, Inc., ArborText, Inc., Fujitsu Software
# Corporation, Norman Walsh, Sun Microsystems, Inc., and the
# Organization for the Advancement of Structured Information
@@ -173,10 +219,29 @@ div {
# for more information.
#
# Please direct all questions, bug reports, or suggestions for changes
- # to the docbook@lists.oasis-open.org mailing list. For more
+ # to the docbook-comment@lists.oasis-open.org mailing list. For more
# information, see http://www.oasis-open.org/docbook/.
#
# ======================================================================
+
+ s:ns [ prefix = "rng" uri = "http://relaxng.org/ns/structure/1.0" ]
+ s:ns [ prefix = "s" uri = "http://purl.oclc.org/dsdl/schematron" ]
+ s:ns [ prefix = "db" uri = "http://docbook.org/ns/docbook" ]
+ s:ns [
+ prefix = "a"
+ uri = "http://relaxng.org/ns/compatibility/annotations/1.0"
+ ]
+ s:ns [ prefix = "xlink" uri = "http://www.w3.org/1999/xlink" ]
+ s:ns [ prefix = "html" uri = "http://www.w3.org/1999/xhtml" ]
+ s:ns [
+ prefix = "ctrl"
+ uri = "http://nwalsh.com/xmlns/schema-control/"
+ ]
+ s:ns [ prefix = "xi" uri = "http://www.w3.org/2001/XInclude" ]
+ s:ns [ prefix = "mml" uri = "http://www.w3.org/1998/Math/MathML" ]
+ s:ns [ prefix = "svg" uri = "http://www.w3.org/2000/svg" ]
+ ]
+ div {
start =
(db.set
| db.book
@@ -483,16 +548,6 @@ div {
(db.linkend.attribute | db.xlink.attributes)?
db.common.req.linking.attributes =
db.linkend.attribute | db.xlink.attributes
- db.common.data.attributes =
-
- ## Specifies the format of the data
- attribute format { text }?,
- (
- ## Indentifies the location of the data by URI
- attribute fileref { xsd:anyURI }
- |
- ## Identifies the location of the data by external identifier (entity name)
- attribute entityref { xsd:ENTITY })
db.verbatim.continuation.enumeration =
## Line numbering continues from the immediately preceding element with the same name.
@@ -1039,7 +1094,9 @@ div {
## A list of operations to be performed in a well-defined sequence
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -1090,7 +1147,9 @@ div {
## Alternative steps in a procedure
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -1146,7 +1205,9 @@ div {
## A summary
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -1180,7 +1241,9 @@ div {
## A short description or note about a person
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -1287,7 +1350,9 @@ div {
## A footnote
[
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1305,7 +1370,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1323,7 +1390,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1341,7 +1410,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1359,7 +1430,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1377,7 +1450,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1395,7 +1470,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1413,7 +1490,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1431,7 +1510,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1449,7 +1530,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1467,7 +1550,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1485,7 +1570,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1503,7 +1590,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1535,7 +1624,9 @@ div {
## A paragraph with a title
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -1700,7 +1791,9 @@ div {
## An undecorated list of single words or short phrases
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -1775,7 +1868,9 @@ div {
## A formal example, with a title
[
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1793,7 +1888,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1811,7 +1908,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1829,7 +1928,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1847,7 +1948,9 @@ div {
" "
]
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -1883,7 +1986,9 @@ div {
## A displayed example without a title
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -1933,7 +2038,9 @@ div {
## Text that a user sees or might see on a computer screen
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -1982,7 +2089,9 @@ div {
## A displayed media object (video, audio, image, etc.)
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -2020,7 +2129,9 @@ div {
## An inline media object (video, audio, image, and so on)
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -2078,7 +2189,9 @@ div {
## A wrapper for image data and its associated meta-information
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -2114,7 +2227,9 @@ div {
## A wrapper for a text description of an object and its associated meta-information
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -2258,7 +2373,9 @@ div {
## Pointer to external image data
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -2858,7 +2975,9 @@ div {
## A statement of legal obligations or requirements
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -3077,7 +3196,9 @@ div {
## A history of the revisions to a document
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -3638,9 +3759,7 @@ div {
db.navigation.components*
db.setindex.components = notAllowed | db.setindex
db.toc.components = notAllowed | db.toc
- db.set.components =
- ((db.set | db.book)+ | db.article+)
- | db.xi.include
+ db.set.components = (db.set | db.book | db.article) | db.xi.include
div {
db.set.status.attribute = db.status.attribute
db.set.role.attribute = attribute role { text }
@@ -3650,7 +3769,9 @@ div {
## A collection of books
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -3672,7 +3793,7 @@ div {
db.set.attlist,
db.set.info,
db.toc.components?,
- db.set.components,
+ db.set.components+,
db.setindex.components?
}
}
@@ -3688,7 +3809,9 @@ div {
## A book
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -3735,7 +3858,9 @@ div {
## An appendix in a book or article
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -3769,7 +3894,9 @@ div {
## A chapter, as of a book
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -3805,7 +3932,9 @@ div {
## A division in a book
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -3839,7 +3968,9 @@ div {
## Introductory matter preceding the first chapter of a book
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -3878,7 +4009,9 @@ div {
## An introduction to the contents of a part
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -3958,7 +4091,9 @@ div {
## An article
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4005,7 +4140,9 @@ div {
## An annotation
[
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -4023,7 +4160,9 @@ div {
" "
]
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4050,7 +4189,9 @@ div {
## Identifies the XLink extended link type
[
s:pattern [
- name = "XLink extended placement"
+ "\x{a}" ~
+ " "
+ s:title [ "XLink extended placement" ]
"\x{a}" ~
" "
s:rule [
@@ -4078,7 +4219,9 @@ div {
## Identifies the XLink locator link type
[
s:pattern [
- name = "XLink locator placement"
+ "\x{a}" ~
+ " "
+ s:title [ "XLink locator placement" ]
"\x{a}" ~
" "
s:rule [
@@ -4106,7 +4249,9 @@ div {
## Identifies the XLink arc link type
[
s:pattern [
- name = "XLink arc placement"
+ "\x{a}" ~
+ " "
+ s:title [ "XLink arc placement" ]
"\x{a}" ~
" "
s:rule [
@@ -4134,7 +4279,9 @@ div {
## Identifies the XLink resource link type
[
s:pattern [
- name = "XLink resource placement"
+ "\x{a}" ~
+ " "
+ s:title [ "XLink resource placement" ]
"\x{a}" ~
" "
s:rule [
@@ -4162,7 +4309,9 @@ div {
## Identifies the XLink title link type
[
s:pattern [
- name = "XLink title placement"
+ "\x{a}" ~
+ " "
+ s:title [ "XLink title placement" ]
"\x{a}" ~
" "
s:rule [
@@ -4288,7 +4437,9 @@ div {
## A top-level section of document
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4326,7 +4477,9 @@ div {
## A subsection within a sect1
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4364,7 +4517,9 @@ div {
## A subsection within a sect2
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4402,7 +4557,9 @@ div {
## A subsection within a sect3
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4439,7 +4596,9 @@ div {
## A subsection within a sect4
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4485,7 +4644,9 @@ div {
## A collection of reference entries
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4519,7 +4680,9 @@ div {
## A reference page (originally a UNIX man-style reference page)
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4670,7 +4833,9 @@ div {
## A syntactic synopsis of the subject of the reference page
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4704,7 +4869,9 @@ div {
## A recursive section in a refentry
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4738,7 +4905,9 @@ div {
## A major subsection of a reference entry
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4773,7 +4942,9 @@ div {
## A subsection of a refsect1
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4813,7 +4984,9 @@ div {
## A subsection of a refsect2
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4849,7 +5022,9 @@ div {
## A wrapper for a list of glossary entries
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -4927,7 +5102,9 @@ div {
## to another
[
s:pattern [
- name = "Glosssary 'see' type constraint"
+ "\x{a}" ~
+ " "
+ s:title [ "Glosssary 'see' type constraint" ]
"\x{a}" ~
" "
s:rule [
@@ -4964,7 +5141,9 @@ div {
## A cross-reference from one glossentry to another
[
s:pattern [
- name = "Glossary 'seealso' type constraint"
+ "\x{a}" ~
+ " "
+ s:title [ "Glossary 'seealso' type constraint" ]
"\x{a}" ~
" "
s:rule [
@@ -5007,7 +5186,9 @@ div {
## The first occurrence of a term, with limited content
[
s:pattern [
- name = "Glossary 'firstterm' type constraint"
+ "\x{a}" ~
+ " "
+ s:title [ "Glossary 'firstterm' type constraint" ]
"\x{a}" ~
" "
s:rule [
@@ -5040,7 +5221,9 @@ div {
## A glossary term
[
s:pattern [
- name = "Glossary 'glossterm' type constraint"
+ "\x{a}" ~
+ " "
+ s:title [ "Glossary 'glossterm' type constraint" ]
"\x{a}" ~
" "
s:rule [
@@ -5073,7 +5256,9 @@ div {
## A glossary term
[
s:pattern [
- name = "Glossary 'glossterm' type constraint"
+ "\x{a}" ~
+ " "
+ s:title [ "Glossary 'glossterm' type constraint" ]
"\x{a}" ~
" "
s:rule [
@@ -5103,7 +5288,9 @@ div {
## A glossary
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -5138,7 +5325,9 @@ div {
## A division in a glossary
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -5378,6 +5567,36 @@ div {
db.indexterm.endofrange =
## Identifies the end of a range associated with an indexed term
+ [
+ s:pattern [
+ "\x{a}" ~
+ " "
+ s:title [ "Indexterm 'startref' type constraint" ]
+ "\x{a}" ~
+ " "
+ s:rule [
+ context = "db:indexterm[@startref]"
+ "\x{a}" ~
+ " "
+ s:assert [
+ test =
+ "local-name(//*[@xml:id=current()/@startref]) = 'indexterm' and namespace-uri(//*[@xml:id=current()/@startref]) = 'http://docbook.org/ns/docbook'"
+ "@startref on indexterm must point to an indexterm."
+ ]
+ "\x{a}" ~
+ " "
+ s:assert [
+ test =
+ "//*[@xml:id=current()/@startref]/@class='startofrange'"
+ "@startref on indexterm must point to a startofrange indexterm."
+ ]
+ "\x{a}" ~
+ " "
+ ]
+ "\x{a}" ~
+ " "
+ ]
+ ]
element indexterm { db.indexterm.endofrange.attlist, empty }
}
div {
@@ -5424,7 +5643,17 @@ div {
}
div {
db.see.role.attribute = attribute role { text }
- db.see.class.attribute = attribute class { "normal" | "under" }
+ db.see.class.attribute =
+
+ ## Identifies the class of 'see'
+ attribute class {
+
+ ## Normal
+ "normal"
+ |
+ ## See 'under'
+ "under"
+ }
db.see.attlist =
db.see.role.attribute?
& db.see.class.attribute?
@@ -5438,7 +5667,16 @@ div {
div {
db.seealso.role.attribute = attribute role { text }
db.seealso.class.attribute =
- attribute class { "normal" | "under" }
+
+ ## Identifies the class of 'seealso'
+ attribute class {
+
+ ## Normal
+ "normal"
+ |
+ ## See 'under'
+ "under"
+ }
db.seealso.attlist =
db.seealso.role.attribute?
& db.seealso.class.attribute?
@@ -5464,7 +5702,9 @@ div {
## An index to a book or part of a book
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -5516,7 +5756,9 @@ div {
## A division in an index
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -5623,7 +5865,6 @@ div {
& db.tocentry.pagenum.attribute?
& db.linkend.attribute?
}
- db.task.info = db._info.title.req
div {
db.task.role.attribute = attribute role { text }
db.task =
@@ -5631,7 +5872,9 @@ div {
## A task to be completed
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -5666,32 +5909,6 @@ div {
& db.common.attributes
& db.common.linking.attributes
db.tasksummary.info = db._info.title.only
- db.tasksummary =
-
- ## A summary of a task
- [
- s:pattern [
- name = "Root must have version"
- "\x{a}" ~
- " "
- s:rule [
- context = "/db:tasksummary"
- "\x{a}" ~
- " "
- s:assert [
- test = "@version"
- "If this element is the root element, it must have a version attribute."
- ]
- "\x{a}" ~
- " "
- ]
- "\x{a}" ~
- " "
- ]
- ]
- element tasksummary {
- db.tasksummary.attlist, db.tasksummary.info, db.all.blocks+
- }
}
div {
db.taskprerequisites.role.attribute = attribute role { text }
@@ -5700,34 +5917,6 @@ div {
& db.common.attributes
& db.common.linking.attributes
db.taskprerequisites.info = db._info.title.only
- db.taskprerequisites =
-
- ## The prerequisites for a task
- [
- s:pattern [
- name = "Root must have version"
- "\x{a}" ~
- " "
- s:rule [
- context = "/db:taskprerequisites"
- "\x{a}" ~
- " "
- s:assert [
- test = "@version"
- "If this element is the root element, it must have a version attribute."
- ]
- "\x{a}" ~
- " "
- ]
- "\x{a}" ~
- " "
- ]
- ]
- element taskprerequisites {
- db.taskprerequisites.attlist,
- db.taskprerequisites.info,
- db.all.blocks+
- }
}
div {
db.taskrelated.role.attribute = attribute role { text }
@@ -5736,32 +5925,6 @@ div {
& db.common.attributes
& db.common.linking.attributes
db.taskrelated.info = db._info.title.only
- db.taskrelated =
-
- ## Information related to a task
- [
- s:pattern [
- name = "Root must have version"
- "\x{a}" ~
- " "
- s:rule [
- context = "/db:taskrelated"
- "\x{a}" ~
- " "
- s:assert [
- test = "@version"
- "If this element is the root element, it must have a version attribute."
- ]
- "\x{a}" ~
- " "
- ]
- "\x{a}" ~
- " "
- ]
- ]
- element taskrelated {
- db.taskrelated.attlist, db.taskrelated.info, db.all.blocks+
- }
}
db.area.units.enumeration =
@@ -6326,7 +6489,9 @@ div {
## A cell in a table
[
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6344,7 +6509,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6439,7 +6606,9 @@ div {
## A formal table in a document
[
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6457,7 +6626,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6475,7 +6646,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6493,7 +6666,9 @@ div {
" "
]
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -6527,7 +6702,9 @@ div {
## A table without a title
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -6816,7 +6993,9 @@ div {
## An HTML table caption
[
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6834,7 +7013,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6852,7 +7033,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6870,7 +7053,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6888,7 +7073,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6906,7 +7093,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6924,7 +7113,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6942,7 +7133,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6960,7 +7153,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6978,7 +7173,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -6996,7 +7193,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -7014,7 +7213,9 @@ div {
" "
]
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -7260,7 +7461,9 @@ div {
## Explanatory material relating to a message in a message set
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -7304,7 +7507,9 @@ div {
## A question-and-answer set
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -7341,7 +7546,9 @@ div {
## A titled division in a qandaset
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -7462,7 +7669,9 @@ div {
## A MathML expression in a media object
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -7519,7 +7728,9 @@ div {
## An SVG drawing in a media object
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -8604,7 +8815,9 @@ div {
## A literal listing of all or part of a program
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -8634,7 +8847,9 @@ div {
## A note of caution
[
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8652,7 +8867,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8670,7 +8887,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8688,7 +8907,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8706,7 +8927,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8724,7 +8947,9 @@ div {
" "
]
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -8753,7 +8978,9 @@ div {
## An admonition set off from the text
[
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8771,7 +8998,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8789,7 +9018,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8807,7 +9038,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8825,7 +9058,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8843,7 +9078,9 @@ div {
" "
]
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -8872,7 +9109,9 @@ div {
## A message set off from the text
[
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8890,7 +9129,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8908,7 +9149,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8926,7 +9169,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8944,7 +9189,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -8962,7 +9209,9 @@ div {
" "
]
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -8989,7 +9238,9 @@ div {
## A suggestion to the user, set off from the text
[
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -9007,7 +9258,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -9025,7 +9278,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -9043,7 +9298,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -9061,7 +9318,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -9079,7 +9338,9 @@ div {
" "
]
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -9106,7 +9367,9 @@ div {
## An admonition set off from the text
[
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -9124,7 +9387,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -9142,7 +9407,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -9160,7 +9427,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -9178,7 +9447,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -9196,7 +9467,9 @@ div {
" "
]
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -9392,7 +9665,9 @@ div {
## A modular unit of documentation not part of any particular narrative flow
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ rng:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -10160,7 +10435,17 @@ div {
& db.variablelist.termlength.attribute?
db.warning.attlist = db.warning.role.attribute? & db.common.attributes
# ========= Changed attributes
-
+ #
+ db.common.data.attributes =
+
+ ## Specifies the format of the data
+ attribute format { db.format.enumeration }?,
+ (
+ ## Indentifies the location of the data by URI
+ attribute fileref { xsd:anyURI }
+ |
+ ## Identifies the location of the data by external identifier (entity name)
+ attribute entityref { xsd:ENTITY })
# Require xlink:href
db.link.attlist =
db.link.role.attribute?
@@ -10212,7 +10497,9 @@ div {
## A untitled figure
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ s:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -10244,7 +10531,9 @@ div {
## A formal figure, generally an illustration, with a title
[
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -10262,7 +10551,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -10280,7 +10571,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -10298,7 +10591,9 @@ div {
" "
]
s:pattern [
- name = "Element exclusion"
+ "\x{a}" ~
+ " "
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -10316,7 +10611,9 @@ div {
" "
]
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ s:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -10352,7 +10649,9 @@ div {
## A paragraph (without block elements)
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ s:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -10438,7 +10737,9 @@ div {
## A unit of action in a procedure
[
s:pattern [
- name = "Root must have version"
+ "\x{a}" ~
+ " "
+ s:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -10467,6 +10768,49 @@ div {
db.result?))
}
}
+ # task* elements
+ div {
+ # Require title or info/title in task
+ db.task.info = db._info.title.onlyreq
+ # tasksummary: Only certain elements are allowed
+ db.tasksummary =
+
+ ## A summary of a task
+ element tasksummary {
+ db.tasksummary.attlist,
+ # db.tasksummary.info,
+ (db.para
+ | db.formalpara
+ | db.itemizedlist
+ | db.orderedlist
+ | db.simplelist
+ | db.variablelist
+ | db.admonition.blocks)+
+ }
+ # taskprerequisites: We allow only paras
+ db.taskprerequisites =
+
+ ## The prerequisites for a task
+ element taskprerequisites {
+ db.taskprerequisites.attlist,
+ # db.taskprerequisites.info,
+ (db.para | db.formalpara)+
+ }
+ # taskrelated: allow only paras and lists
+ db.taskrelated =
+
+ ## Information related to a task
+ element taskrelated {
+ db.taskrelated.attlist,
+ # db.taskrelated.info,
+ (db.para
+ | db.formalpara
+ | db.itemizedlist
+ | db.orderedlist
+ | db.simplelist
+ | db.variablelist)+
+ }
+ }
# lists: itemizedlist, orderedlist, variablelist
div {
# Removed several block elements as child of list elements
diff --git a/geekodoc/rng/geekodoc5.rnc b/geekodoc/rng/geekodoc5.rnc
index dc2f585..1d8d145 100644
--- a/geekodoc/rng/geekodoc5.rnc
+++ b/geekodoc/rng/geekodoc5.rnc
@@ -8,7 +8,9 @@
# Author: Thomas Schraitle, 2015-2016
#
default namespace db = "http://docbook.org/ns/docbook"
+datatypes xsd = "http://www.w3.org/2001/XMLSchema-datatypes"
+# Namespaces
namespace a = "http://relaxng.org/ns/compatibility/annotations/1.0"
namespace ctrl = "http://nwalsh.com/xmlns/schema-control/"
namespace html = "http://www.w3.org/1999/xhtml"
@@ -18,14 +20,19 @@ namespace s = "http://purl.oclc.org/dsdl/schematron"
namespace xlink = "http://www.w3.org/1999/xlink"
namespace local = ""
+
# Namespace for DocBook Transclusions
# See http://docbook.org/docs/transclusion/
namespace locattr="http://www.w3.org/2001/XInclude/local-attributes"
namespace trans="http://docbook.org/ns/transclusion"
-datatypes xsd = "http://www.w3.org/2001/XMLSchema-datatypes"
+# Define some namespaces for Schematron
+s:ns [ uri = "http://docbook.org/ns/docbook" prefix = "db" ]
+s:ns [ uri = "http://www.w3.org/1999/xlink" prefix = "xlink" ]
+
+# Constants
+suse.schema.version = "5.1-subset GeekoDoc-0.9.8"
-suse.schema.version = "5.1-subset GeekoDoc-0.9.6"
#
include "transclusion.rnc"
@@ -111,6 +118,33 @@ div {
& db.xlink.actuate.attribute?
}
+div {
+
+ db.format.enumeration =
+ ## Allowed formats for SUSE documentation
+ (
+ ## Format of the dia tool
+ "DIA" |
+ ## Encapsulated PostScript
+ "EPS" |
+ ## Format of the xfig tool
+ "FIG" |
+ ## LibreOffice illustration format
+ "ODG" |
+ ## Portable Document Format
+ "PDF" |
+ ## Portable Network Graphics
+ "PNG" |
+ ## Scalable Vector Graphics
+ "SVG" )
+ # The original list from the DocBook 4.5 DTD was:
+ # "BMP| CGM-CHAR | CGM-BINARY | CGM-CLEAR | DITROFF | DVI
+ # | EPS | EQN | FAX | GIF | GIF87a | GIF89a
+ # | JPG | JPEG | IGES | PCX
+ # | PIC | PNG | PS | SGML | TBL | TEX | TIFF | WMF | WPG
+ # | SVG | PDF | SWF
+ # | linespecific
+}
# Use a catalog entry to resolve this:
# include "http://docbook.org/xml/5.1/rng/docbook.rnc"
include "docbookxi.rnc"
@@ -958,6 +992,19 @@ include "docbookxi.rnc"
& db.common.attributes
#========= Changed attributes
+ #
+ db.common.data.attributes =
+ ## Specifies the format of the data
+ attribute format { db.format.enumeration }?,
+ (
+ ## Indentifies the location of the data by URI
+ attribute fileref { xsd:anyURI }
+ |
+ ## Identifies the location of the data by external identifier (entity name)
+ attribute entityref { xsd:ENTITY }
+ )
+
+
# Require xlink:href
db.link.attlist =
@@ -1013,7 +1060,7 @@ include "docbookxi.rnc"
## A untitled figure
[
s:pattern [
- name = "Root must have version"
+ s:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -1045,7 +1092,7 @@ include "docbookxi.rnc"
## A formal figure, generally an illustration, with a title
[
s:pattern [
- name = "Element exclusion"
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1063,7 +1110,7 @@ include "docbookxi.rnc"
" "
]
s:pattern [
- name = "Element exclusion"
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1081,7 +1128,7 @@ include "docbookxi.rnc"
" "
]
s:pattern [
- name = "Element exclusion"
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1099,7 +1146,7 @@ include "docbookxi.rnc"
" "
]
s:pattern [
- name = "Element exclusion"
+ s:title [ "Element exclusion" ]
"\x{a}" ~
" "
s:rule [
@@ -1117,7 +1164,7 @@ include "docbookxi.rnc"
" "
]
s:pattern [
- name = "Root must have version"
+ s:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -1154,7 +1201,7 @@ include "docbookxi.rnc"
## A paragraph (without block elements)
[
s:pattern [
- name = "Root must have version"
+ s:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -1239,7 +1286,7 @@ include "docbookxi.rnc"
## A unit of action in a procedure
[
s:pattern [
- name = "Root must have version"
+ s:title [ "Root must have version" ]
"\x{a}" ~
" "
s:rule [
@@ -1269,6 +1316,44 @@ include "docbookxi.rnc"
}
}
+ # task* elements
+ div {
+ # Require title or info/title in task
+ db.task.info = db._info.title.onlyreq
+
+ # tasksummary: Only certain elements are allowed
+ db.tasksummary =
+ ## A summary of a task
+ element tasksummary {
+ db.tasksummary.attlist,
+ # db.tasksummary.info,
+ (db.para | db.formalpara |
+ db.itemizedlist | db.orderedlist | db.simplelist | db.variablelist |
+ db.admonition.blocks
+ )+
+ }
+
+ # taskprerequisites: We allow only paras
+ db.taskprerequisites =
+ ## The prerequisites for a task
+ element taskprerequisites {
+ db.taskprerequisites.attlist,
+ # db.taskprerequisites.info,
+ (db.para | db.formalpara)+
+ }
+
+ # taskrelated: allow only paras and lists
+ db.taskrelated =
+ ## Information related to a task
+ element taskrelated {
+ db.taskrelated.attlist,
+ # db.taskrelated.info,
+ (db.para | db.formalpara |
+ db.itemizedlist | db.orderedlist | db.simplelist | db.variablelist
+ )+
+ }
+ }
+
# lists: itemizedlist, orderedlist, variablelist
div {
# Removed several block elements as child of list elements
diff --git a/geekodoc/tests/article-base.xml b/geekodoc/tests/article-base.xml
new file mode 100644
index 0000000..64de304
--- /dev/null
+++ b/geekodoc/tests/article-base.xml
@@ -0,0 +1,6 @@
+
+
+
+ Test Article
+ The quick brown fox jumps over the lazy dog.
+
diff --git a/geekodoc/tests/book.storage.admin.xml b/geekodoc/tests/book.storage.admin.xml
new file mode 100644
index 0000000..5b94980
--- /dev/null
+++ b/geekodoc/tests/book.storage.admin.xml
@@ -0,0 +1,16607 @@
+
+
+
+ Administration and Deployment GuideSUSE Enterprise StorageSES
+ 4
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES4
+
+
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES2.1
+
+
+
+ Copyright ©
+
+
+ SUSE LLC
+
+
+ Copyright © 2010-2014, Inktank Storage, Inc. and contributors.
+
+
+
+ The text of and illustrations in this document are licensed by Inktank
+ Storage under a Creative Commons Attribution-Share Alike 4.0
+ International ("CC-BY-SA"). An explanation of CC-BY-SA is available at
+ .
+ In accordance with CC-BY-SA, if you distribute this document or an
+ adaptation of it, you must provide the URL for the original version.
+
+
+ This document is an adaption of original works found at
+ (2015-01-30).
+
+
+
+ Red Hat, Red Hat Enterprise Linux, the Shadowman logo, JBoss, MetaMatrix,
+ Fedora, the Infinity Logo, and RHCE are trademarks of Red Hat, Inc.,
+ registered in the United States and other countries. Linux® is the
+ registered trademark of Linus Torvalds in the United States and other
+ countries. Java® is a registered trademark of Oracle and/or its
+ affiliates. XFS® is a trademark of Silicon Graphics International
+ Corp. or its subsidiaries in the United States and/or other countries.
+ MySQL® is a registered trademark of MySQL AB in the United States, the
+ European Union and other countries. All other trademarks are the property
+ of their respective owners.
+
+
+ For SUSE or Novell trademarks, see the Novell Trademark and Service Mark
+ list
+ .
+ Linux* is a registered trademark of Linus Torvalds. All other third party
+ trademarks are the property of their respective owners. A trademark symbol
+ (®, ™ etc.) denotes a SUSE or Novell trademark; an asterisk (*)
+ denotes a third party trademark.
+
+
+ All information found in this book has been compiled with utmost attention
+ to detail. However, this does not guarantee complete accuracy. Neither
+ SUSE LLC, the authors, nor the translators shall be held liable for
+ possible errors or the consequences thereof.
+
+
+
+
+ About This Guide
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES2.1
+
+
+
+ SUSE Enterprise Storage is an extension to SUSE Linux Enterprise. It combines the capabilities from the
+ Ceph () storage project with the
+ enterprise engineering and support of SUSE. SUSE Enterprise Storage provides IT
+ organizations with the ability to deploy a distributed storage architecture
+ that can support a number of use cases using commodity hardware platforms.
+
+
+ This guide helps you understand the concept of the SUSE Enterprise Storage with the
+ main focus on managing and administrating the Ceph infrastructure. It also
+ demonstrates how to use Ceph with other related solutions, such as OpenStack
+ or KVM.
+
+
+ Many chapters in this manual contain links to additional documentation
+ resources. These include additional documentation that is available on the
+ system as well as documentation available on the Internet.
+
+
+ For an overview of the documentation available for your product and the
+ latest documentation updates, refer to
+ .
+
+
+ Available Documentation
+
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES2.1
+
+
+
+
+
+ The following manuals are available for this product:
+
+
+
+
+
+
+
+
+ Guides you through Ceph installation steps and cluster management tasks,
+ including description of basic Ceph cluster structure and related terminology.
+ The guide also introduces steps to integrate Ceph with virtualization solutions
+ such as libvirt, Xen, or KVM, and ways to access objects stored in the cluster via
+ iSCSI and RADOS gateways.
+
+
+ The Best Practice chapter (see )
+ includes selected practical topics sorted by categories, so that you can easily
+ find a solution or more information to a specific problem.
+
+
+
+
+
+
+ HTML versions of the product manuals can be found in the installed system
+ under /usr/share/doc/manual. Additionally, you can
+ access the product-specific manuals as well as upstream documentation from
+ the Help links in the graphical Web interfaces. Find
+ the latest documentation updates at
+ where you can
+ download the manuals for your product in multiple formats.
+
+
+
+ Feedback
+
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES2.1
+
+
+
+
+ Several feedback channels are available:
+
+
+
+
+
+ Bugs and Enhancement Requests
+
+
+ For services and support options available for your product, refer to
+ .
+
+
+ To report bugs for a product component, log in to the Novell Customer Center from
+ and select
+ My Support Service
+ Request .
+
+
+
+
+ User Comments
+
+
+ We want to hear your comments about and suggestions for this manual and
+ the other documentation included with this product. Use the User
+ Comments feature at the bottom of each page in the online documentation
+ or go to
+
+ and enter your comments there.
+
+
+
+
+
+ Mail
+
+
+ For feedback on the documentation of this product, you can also send a
+ mail to doc-team@suse.de. Make sure to include the
+ document title, the product version, and the publication date of the
+ documentation. To report errors or suggest enhancements, provide a
+ concise description of the problem and refer to the respective section
+ number and page (or URL).
+
+
+
+
+
+
+
+
+ Documentation Conventions
+
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES2.1
+
+
+
+
+ The following typographical conventions are used in this manual:
+
+
+
+
+
+ /etc/passwd: directory names and file names
+
+
+
+
+ placeholder: replace
+ placeholder with the actual value
+
+
+
+
+ PATH: the environment variable PATH
+
+
+
+
+ ls, : commands, options, and
+ parameters
+
+
+
+
+ user: users or groups
+
+
+
+
+ ,
+ F1 : a key to press or a key combination;
+ keys are shown in uppercase as on a keyboard
+
+
+
+
+ File, File
+ Save As : menu items, buttons
+
+
+
+
+
+ Dancing Penguins (Chapter
+ Penguins, ↑Another Manual): This is a
+ reference to a chapter in another manual.
+
+
+
+
+
+ About the Making of This Manual
+
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES2.1
+
+
+
+
+ This book is written in Novdoc, a subset of DocBook (see
+ ). The XML source files were
+ validated by xmllint, processed by
+ xsltproc, and converted into XSL-FO using a customized
+ version of Norman Walsh's stylesheets. The final PDF can be formatted
+ through FOP from Apache or through XEP from RenderX. The authoring and
+ publishing tools used to produce this manual are available in the package
+ daps. The DocBook Authoring and
+ Publishing Suite (DAPS) is developed as open source software. For more
+ information, see .
+
+
+
+
+ SUSE Enterprise Storage
+
+ About SUSE Enterprise Storage
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES4
+
+
+
+ Introduction
+
+
+ SUSE Enterprise Storage is a distributed storage designed for scalability, reliability and
+ performance based on the Ceph technology. As opposed to conventional
+ systems which have allocation tables to store and fetch data, Ceph uses a
+ pseudo-random data distribution function to store data, which reduces the
+ number of look-ups required in storage. Data is stored on intelligent object
+ storage devices (OSDs) by using daemons, which automates data management
+ tasks such as data distribution, data replication, failure detection and
+ recovery. Ceph is both self-healing and self-managing which results in
+ reduction of administrative and budget overhead.
+
+
+
+ The Ceph storage cluster uses two mandatory types of nodes—monitors
+ and OSD daemons:
+
+
+
+
+ Monitor
+
+
+ Monitoring nodes maintain information about cluster health state, a map
+ of other monitoring nodes and a CRUSH map.
+
+
+ Monitor nodes also keep history of changes performed to the cluster.
+
+
+
+
+ OSD Daemon
+
+
+ An OSD daemon stores data and manages the data replication and
+ rebalancing processes. Each OSD daemon handles one or more OSDs, which
+ can be physical disks/partitions or logical volumes.
+
+
+ OSD daemons also communicate with monitor nodes and provide them with the
+ state of the other OSD daemons.
+
+
+
+
+
+
+ The Ceph storage cluster can use the following optional node types:
+
+
+
+
+ Metadata Server (MDS)
+
+
+ The metadata servers store metadata for the Ceph file system. By using
+ MDS you can execute basic file system commands such as
+ ls without overloading the cluster.
+
+
+
+
+ RADOS Gateway
+
+
+ RADOS Gateway is an HTTP REST gateway for the RADOS object store. You can use
+ this node type also when using the Ceph file system.
+
+
+
+
+
+
+ Each Node Type on a Separate Server
+
+ We strongly recommend to install only one node type on a single server.
+
+
+
+
+ The Ceph environment has the following features:
+
+
+
+
+ Controlled, Scalable, Decentralized Placement of replicated Data using CRUSH
+
+
+ The Ceph system uses a unique map called CRUSH (Controlled Replication
+ Under Scalable Hashing) to assign data to OSDs in an efficient manner.
+ Data assignment offsets are generated as opposed to being looked up in
+ tables. This does away with disk look-ups which come with conventional
+ allocation table based systems, reducing the communication between the
+ storage and the client. The client armed with the CRUSH map and the
+ metadata such as object name and byte offset knows where it can find the
+ data or which OSD it needs to place the data.
+
+
+ CRUSH maintains a hierarchy of devices and the replica placement policy.
+ As new devices are added, data from existing nodes is moved to the new
+ device to improve distribution with regard to workload and resilience. As
+ a part of the replica placement policy, it can add weights to the devices
+ so some devices are more favored as opposed to others. This could be used
+ to give more weights to Solid State Devices (SSDs) and lower weights to
+ conventional rotational hard disks to get overall better performance.
+
+
+ CRUSH is designed to optimally distribute data to make use of available
+ devices efficiently. CRUSH supports different ways of data distribution
+ such as the following:
+
+
+
+
+ n-way replication (mirroring)
+
+
+
+
+ RAID parity schemes
+
+
+
+
+ Erasure Coding
+
+
+
+
+ Hybrid approaches such as RAID-10
+
+
+
+
+
+
+ Reliable Autonomic Distributed Object Storage (RADOS)
+
+
+ The intelligence in the OSD Daemons allows tasks such as data replication
+ and migration for self-management and self-healing automatically. By
+ default, data written to Ceph storage is replicated within the OSDs.
+ The level and type of replication is configurable. In case of failures,
+ the CRUSH map is updated and data is written to new (replicated) OSDs.
+
+
+ The intelligence of OSD Daemons enables to handle data replication, data
+ migration, failure detection and recovery. These tasks are automatically
+ and autonomously managed. This also allows the creation of various pools
+ for different sorts of I/O.
+
+
+
+
+ Replicated Monitor Servers
+
+
+ The monitor servers keep track of all the devices in the system. They
+ manage the CRUSH map which is used to determine where the data needs to
+ be placed. In case of failures of any of the OSDs, the CRUSH map is
+ re-generated and re-distributed to the rest of the system. At a given
+ time, it is recommended that a system contains multiple monitor servers
+ to add redundancy and improve resilience.
+
+
+
+
+
+ Configuration and managment framework for your cluster - DeepSea
+
+
+
+ DeepSea is a collection of Salt states, runners and modules for deploying and managing Ceph.
+
+
+
+
+
+
+ Currently the Ceph storage cluster can provide the following services:
+
+
+
+
+
+ Ceph object storage
+
+
+
+
+ Ceph file system
+
+
+
+
+ RADOS block device
+
+
+
+
+
+ Additional Information
+
+
+ Ceph as a community project has its own extensive online documentation.
+ For topics not found in this manual refer to
+ .
+
+
+
+
+ System Requirements
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES4
+
+
+
+ Minimal Recommendations per Storage Node
+
+
+
+
+ 2 GB of RAM per each Terabyte of OSD (2‰ of total raw capacity).
+
+
+
+
+ 1.5 GHz of a CPU core per OSD.
+
+
+
+
+ Bonded or redundant 10GbE networks.
+
+
+
+
+ OSD disks in JBOD configurations.
+
+
+
+
+ OSD disks should be exclusively used by SUSE Enterprise Storage.
+
+
+
+
+ Dedicated disk/SSD for the operating system, preferably in a RAID1
+ configuration.
+
+
+
+
+ Additional 4 GB of RAM if cache tiering is used.
+
+
+
+
+
+ Minimal Recommendations per Monitor Node
+
+
+
+
+ 3 SUSE Enterprise Storage monitor nodes recommended.
+
+
+
+
+ 2 GB of RAM per monitor.
+
+
+
+
+ SSD or fast hard disk in a RAID1 configuration
+
+
+
+
+ On installations with fewer than seven nodes, these can be hosted on the
+ system disk of the OSD nodes.
+
+
+
+
+ Nodes should be bare metal, not virtualized, for performance reasons.
+
+
+
+
+ Mixing OSD, monitor, or RADOS Gateway nodes with the actual workload is not
+ supported. No other load generating services other than OSDs, monitors or
+ RADOS Gateways daemons are supported on the same host.
+
+
+
+
+ Configurations may vary from, and frequently exceed, these recommendations
+ depending on individual sizing and performance needs.
+
+
+
+
+ Bonded network interfaces for redundancy.
+
+
+
+
+
+ Minimal Recommendations for RADOS Gateway Nodes
+
+
+ RADOS Gateway nodes should have 6-8 CPU cores and 32 GB of RAM (64 GB recommended).
+
+
+
+ Minimal Recommendations for iSCSI Nodes
+
+
+ iSCSI nodes should have 6-8 CPU cores and 16 GB of RAM.
+
+
+
+ Naming Limitations
+
+
+ Ceph does not generally support non-ASCII characters in configuration
+ files, pool names, user names and so forth. When configuring a Ceph
+ cluster we recommend using only simple alphanumeric characters (A-Z, a-z,
+ 0-9) and minimal punctuation ('.', '-', '_') in all Ceph
+ object/configuration names.
+
+
+
+
+
+ Cluster Deployment and Upgrade
+
+ Introduction
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES4
+
+
+
+ This chapter outlines procedures to deploy the Ceph cluster. Currently we
+ support the following methods of deployment:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Do Not Mix Installation Methods
+
+ You cannot mix the supported installation methods. For example if you decide
+ to deploy the cluster with Crowbar, you cannot later make changes to its
+ settings with ceph-deploy and vice versa.
+
+
+
+
+
+ Deploying with DeepSea and Salt
+
+ Salt along with DeepSea is a stack of components
+ that help you deploy and manage server infrastructure. It is very scalable,
+ fast, and relatively easy to get running. Read the following considerations
+ before you start deploying the cluster with Salt:
+
+
+
+
+ Salt master is the host that controls the whole cluster
+ deployment. Dedicate all the host resources to the Salt master services. Do
+ not install Ceph on the host where you want to run Salt master.
+
+
+
+
+ Salt minions are the nodes controlled by Salt master. In
+ the Ceph environment, Salt minion is typically an OSD or monitor.
+
+
+
+
+ Salt minions need to correctly resolve the Salt master's host name over the
+ network. By default, they look for the salt host
+ name. Therefore, we recommend to set the Salt master's host name to
+ salt.
+
+
+
+
+ Introduction to DeepSea
+
+
+ The goal of DeepSea is to save the administrator time and confidently
+ perform complex operations on a Ceph cluster. This idea has driven a few
+ choices. Before presenting those choices, some observations are necessary.
+
+
+
+ All software has configuration. Sometimes the default is sufficient. This is
+ not the case with Ceph. Ceph is flexible almost to a fault. Reducing
+ this complexity would force administrators into preconceived configurations.
+ Several of the existing Ceph solutions for an installation create a
+ demonstration cluster of three nodes. However, the most interesting features
+ of Ceph require more.
+
+
+
+ One aspect of configuration management tools is accessing the data such as
+ addresses and device names of the individual servers. For a distributed
+ storage system such as Ceph, that aggregate can run into the hundreds.
+ Collecting the information and entering the data manually into a
+ configuration management tool is prohibitive and error prone.
+
+
+
+ The steps necessary to provision the servers, collect the configuration,
+ configure and deploy Ceph are mostly the same. However, this does not
+ address managing the separate functions. For day to day operations, the
+ ability to trivially add hardware to a given function and remove it
+ gracefully is a requirement.
+
+
+
+ With these observations in mind, DeepSea addresses them with the following
+ strategy. DeepSea Consolidates the administrators decisions in a single location. The
+ decisions revolve around cluster assignment, role assignment and profile
+ assignment. And DeepSea collects each set of tasks into a simple goal. Each goal is a Stage:
+
+
+
+
+
+ Stage 0—the
+ provisioning— this stage is
+ optional as many sites provides their own provisioning of servers. If
+ you do not have your provisioning tool, you should run this stage.
+ During this stage all required updates are applied and your system may
+ be rebooted.
+
+
+
+
+ Stage 1—the
+ discovery— here you detect all
+ hardware in your cluster and collect necessary information for the
+ Ceph configuration. For details about configuration refer to
+ .
+
+
+
+
+ Stage 2—the
+ configuration— you need to
+ prepare configuration data in a particular format.
+
+
+
+
+ Stage 3—the
+ deployment— creates a basic
+ Ceph cluster with OSD and monitors.
+
+
+
+
+ Stage 4—the
+ services— additional features of
+ Ceph like iSCSI, RadosGW and CephFS can be installed in this stage.
+ Each is optional.
+
+
+
+
+ Stage 5—the removal stage. This
+ stage is not mandatory and during the initial setup it is usually not
+ needed. In this stage the roles of minions and also the cluster
+ configuration are removed. This stage is usually need when you need to
+ remove a storage node from your cluster, for details refer to
+ .
+
+
+
+
+
+
+
+ Organization and Important Locations
+
+ Salt has several standard locations and several naming conventions used
+ on your master node:
+
+
+
+ /srv/pillar
+
+
+
+ The directory stores configuration data for your cluster minions.
+ Pillar is an interface for providing global
+ configuration values to all your cluster minions.
+
+
+
+
+ /srv/salt/
+
+
+
+ The directory stores Salt state files (also called
+ sls files). State files are formatted description
+ of states in which the cluster should be. For more refer to the
+ Salt
+ documentation.
+
+
+
+
+ /srv/module/runners
+
+
+
+ The directory stores python scripts known as runners. Runners are
+ executed on the master node.
+
+
+
+
+ /srv/salt/_modules
+
+
+
+ The directory stores python scripts that are called modules. The modules
+ are applied to all minions in your cluster.
+
+
+
+
+ /srv/pillar/ceph
+
+
+
+ The directory is used by DeepSea. Collected configuration data are
+ stored there.
+
+
+
+
+ /srv/salt/ceph
+
+
+
+ Is a directory used by DeepSea. The directory stores aggregated
+ configuration data that are ready to use by various salt commands. The
+ directory stores sls files that can be in different format, but each
+ subdirectory contains sls files in the same format. For example,
+ /srv/salt/ceph/stage contains orchestration files
+ that are executed by the salt-run state.orchestrate.
+
+
+
+
+
+
+
+ Deploying with DeepSea and Salt
+
+
+ The cluster deployment process by using Salt has several phases. First,
+ you need to prepare all nodes of the cluster and then you deploy and
+ configure Ceph.
+
+
+
+ The following procedure describes the cluster preparation in detail.
+
+
+
+
+
+ Install and register SUSE Linux Enterprise Server 12 SP2 together with SUSE Enterprise Storage 4 extension on each
+ node of the cluster.
+
+
+
+
+ Install DeepSea on the node which you will used as the Salt master:
+
+root@master > zypper in deepsea
+
+ The command installs also the salt-master package.
+
+
+
+
+ Install the package salt-minion on all cluster nodes
+ including the Salt master node.
+
+root # zypper in salt-minion
+
+
+
+ Configure all minions to connect to the master. If your Salt master is not reachable by the DNS name salt, edit the file /etc/salt/minion or create a new file /etc/salt/minion.d/master.conf with the following:
+
+ master:<DNS name of you Salt master>
+
+
+
+ If you performed changes to files mentioned in the previous step, restart the Salt service on all minions:
+
+ root@minion > systemctl restart salt-minion.service
+
+
+
+ Check that the Salt State file:
+ /srv/pillar/ceph/master_minion.sls points to your
+ Salt master. If you used the default host name for your Salt master -
+ salt in the ses domain, then the
+ file looks as follows:
+
+master_minion: salt.ses
+
+
+
+ Restart the Salt service on the master node:
+
+ root@master > systemctl restart salt-master.service
+
+
+
+ Accept all salt keys on the Salt master:
+
+root@master > salt-key --accept-all
+
+
+
+ Verify that the keys have been accepted:
+
+root@master > salt-key --list-all
+
+
+
+
+ Ensure that you have access to Ceph Jewel repositories.
+
+
+
+
+
+ Now you deploy and configure Ceph
+
+
+
+ Salt Command Conventions
+
+ There are two possibilities how to run salt-run
+ state.orch - one is with stage.<stage
+ number>, the other is with a name of the stage. Both notations
+ have the same impact and it is fully up to your preferences which command
+ will you use.
+
+
+
+
+ Unless specified otherwise, all steps are mandatory.
+
+
+
+
+
+ Now optionally provision your cluster. You cam omit this step if you have
+ your own provisioning server.
+
+root@master > salt-run state.orch ceph.stage.0
+
+ or
+
+root@master > salt-run state.orch ceph.stage.prep
+
+
+
+ The discovery stage collects data from all minions and creates
+ configuration fragments that are stored in the directory
+ /srv/pillar/ceph/proposals. The data are stored in
+ the YAML format in sls or yml files.
+
+root@master > salt-run state.orch ceph.stage.1
+
+ or
+
+root@master > salt-run state.orch ceph.stage.discovery
+
+
+
+ After the previous command finishes successfully, create a
+ policy.cfg file in
+ /srv/pillar/ceph/proposals. For details refer to
+ .
+
+
+
+
+ The configuration stage parses the policy.cfg file
+ and merges the included files into their final form. Cluster and role
+ related contents are placed in
+ /srv/pillar/ceph/cluster, while Ceph specific
+ content is placed in /srv/pillar/ceph/stack/default.
+
+
+ Run the following command twice to
+ trigger the configuration stage:
+
+root@master > salt-run state.orch ceph.stage.2
+
+ or
+
+root@master > salt-run state.orch ceph.stage.configure
+
+ The configuration step may take several seconds. After the command
+ finishes, you can view the pillar data for all minions by running:
+
+root@master > salt '*' pillar.items
+
+ Overwriting Defaults
+
+ As soon as the command finishes, you can view the default configuration
+ and change it to suit your needs. For details refer to
+ .
+
+
+
+
+
+ Now you run the deploy stage. In this stage the pillar is validated and
+ monitors and ODS daemons are created on the storage nodes. Run the
+ following to start the stage:
+
+root@master > salt-run state.orch ceph.stage.3
+
+ or
+
+root@master > salt-run state.orch ceph.stage.deploy
+
+
+ The command may take several minutes. If it fails, you have to fix the
+ issue (rerunning of previous stages may be required). After the command
+ succeeds, run the following to check the status:
+
+ceph -s
+
+
+
+ The last step of the Ceph cluster deployment is the
+ services stage. Here you instantiate any of the
+ currently supported services: iSCSI, CephFS, RADOS Gateway and openATTIC. In this
+ stage necessary pool, authorizing keyrings and starting services are
+ created. To start the stage, run the following:
+
+root@master > salt-run state.orch ceph.stage.4
+
+ or
+
+root@master > salt-run state.orch ceph.stage.services
+
+ Depending on the setup, the command may run several minutes.
+
+
+
+
+
+ Configuration
+
+
+ The policy.cfg File
+
+ The /srv/pillar/ceph/proposals/policy.cfg
+ configuration file is used to determine functions of individual cluster
+ nodes (which node acts as OSD, which is a monitoring node, etc.). The file
+ then includes configuration for individual nodes.
+
+
+ Currently the only way how to configure policy is by manually editing the
+ /srv/pillar/ceph/proposals/policy.cfg configuration
+ file. The file is divided into four sections:
+
+
+
+
+ .
+
+
+
+
+ .
+
+
+
+
+ .
+
+
+
+
+ .
+
+
+
+
+ The order of the sections is arbitrary, but the content of included lines
+ overwrites matching keys from the contents of previous lines.
+
+
+ Cluster Assignment
+
+ In the cluster section you select minions
+ for your cluster. You can select all minions, or you can blacklist or
+ whitelist minions. Examples for a cluster called
+ ceph follows.
+
+
+ To include all minions, add the following
+ lines:
+
+cluster-ceph/cluster/*.sls
+
+ To whilelist a particular minion:
+
+cluster-ceph/cluster/abc.domain.sls
+
+
+ or a group of minions—you can shell glob matching:
+
+cluster-ceph/cluster/mon*.sls
+
+ To blacklist a minion/s, set the minion/s
+ to unassigned:
+
+cluster-unassigned/cluster/client*.sls
+
+
+ Role Assignment
+
+ In the section you need to assign roles
+ to your cluster nodes. The general pattern is the following:
+
+ role-<role name>/<path>/<included files>
+
+ Where the items have the following meaning and values:
+
+
+
+
+ <role name>
+ is any of the following: master, admin, mon, mds, igw
+ or rgw. Detailed description see below.
+
+
+
+
+ <path> is a relative path to sls or yml files. Usually in case of sls files it is cluster, while yml files are located at stack/default/ceph/minions.
+
+
+
+
+ <included files> are the Salt state files or YAML configuration files. Shell globing can be used for more specific matching.
+
+
+
+
+
+
+ An example for each role follows:
+
+
+
+
+ master - the node has admin keyrings to all Ceph
+ clusters. Currently, only a single Ceph cluster is supported. The master role is mandatory, always add a similar line like the following:
+
+ role-master/cluster/*.sls
+
+
+
+ admin - the minion will have an admin keyring. You define the role as follows:
+
+ role-admin/cluster/*.sls
+
+
+
+ mon - the minion will provide the monitoring
+ service to the Ceph cluster. This role requires addresses of the
+ assigned minions, thus you need to include the files from the
+ stack directory on top of the sls files:
+
+role-mon/stack/default/ceph/minions/mon*.yml
+role-mon/cluster/*.sls
+
+The example assigns the monitoring role to a group of minions.
+
+
+
+
+ mds - the minion will provide the metadata service to
+ support CephFS.
+
+ role-mds/cluster/*.sls
+
+
+
+ igw - the minion will act as a iSCSI gateway. This
+ role requires addresses of the assigned minions, thus you need to
+ also include the files from the stack directory:
+
+role-igw/stack/default/ceph/minions/xyz.domain.yml
+role-igw/cluster/*.sls
+
+
+
+ rgw - the minion will act as a RADOS Gateway:
+
+ role-rgw/cluster/*.sls
+
+
+
+
+ Multiple Roles of Cluster Nodes
+
+ You can assign several roles to a single node. For instance, you can
+ assign to two monitor nodes also the mds role:
+
+role-mds/cluster/mon[12]*.sls
+
+
+
+ Common Configuration
+
+ The common configuration section includes configuration files generated
+ during the discovery (stage 1). These configuration
+ files store parameters like fsid or
+ public_network. To include the required Ceph common
+ configuration, add the following lines:
+
+config/stack/default/global.yml
+config/stack/default/ceph/cluster.yml
+
+
+
+ Profile Assignment
+
+ In Ceph, a single storage role would be insufficient to describe the
+ many disk configurations available with the same hardware. Therefore,
+ stage 1 will generate multiple profiles when possible for the same storage
+ node. The administrator adds the cluster and stack related lines similar
+ to the mon and igw roles.
+
+
+ The profile names begin with profile and end with a
+ single digit. The format is the following:
+
+profile-<label>-<single digit><path to sls or yml files>
+
+ where the items have the following meaning and values:
+
+
+
+
+ <label> is dynamically generated based on quantity,
+ model and size of the media, e.g. 2Disk2GB.
+
+
+
+
+ <single digit> - defines the type of profile and
+ reflects the count of medias attached to the minion. When
+ 1 is specified, the media is treated like an
+ individual OSD. When you specify 2 the node is with
+ solid state media drive (SSD or NVMe) and the solid state media is
+ considered as separate journals. Depending on the number of models and
+ ratio of drives, additional profiles may be created by incrementing the
+ digit.
+
+
+
+
+ <path to sls or yml files> - replace it with a proper
+ path to cluster sls files or to stack yml configuration files.
+
+
+
+
+ Now check the content of yml files in the
+ stack/default/ceph/minions for the specific
+ configuration. Then configure the profiles according to the following
+ examples:
+
+
+ A minion with a single disk called 3HP5588GB:
+
+profile-3HP5588-1/cluster/*.sls
+profile-3HP5588-1/stack/default/ceph/minions/*.yml
+
+ A minion with two disks 2Intel745GB and
+ 6INTEL372GB.
+
+profile-2Intel745GB-6INTEL372GB-2/cluster/*.sls
+profile-2Intel745GB-6INTEL372GB-2/stack/default/ceph/minions/*.yml
+
+ You can add as many lines as needed to define each a profile for each
+ storage node:
+
+profile-24HP5588-1/cluster/cold*.sls
+profile-24HP5588-1/stack/default/ceph/minions/cold*.yml
+profile-18HP5588-6INTEL372GB-2/cluster/data*.sls
+profile-18HP5588-6INTEL372GB-2/stack/default/ceph/minions/data*.yml
+
+
+
+
+ Customizing the Default Configuration
+
+ You can change the default configuration generated in the stage 2. The
+ pillar is updated after the stage 2, thus you can view the current settings
+ by running:
+
+root@master > salt '*' pillar.items
+
+ The output of default configuration for a single minion is usually similar
+ to the following:
+
+----------
+ available_roles:
+ - admin
+ - mon
+ - storage
+ - mds
+ - igw
+ - rgw
+ - client-cephfs
+ - client-radosgw
+ - client-iscsi
+ - mds-nfs
+ - rgw-nfs
+ - master
+ cluster:
+ ceph
+ cluster_network:
+ 172.16.22.0/24
+ fsid:
+ e08ec63c-8268-3f04-bcdb-614921e94342
+ master_minion:
+ admin.ceph
+ mon_host:
+ - 172.16.21.13
+ - 172.16.21.11
+ - 172.16.21.12
+ mon_initial_members:
+ - mon3
+ - mon1
+ - mon2
+ public_address:
+ 172.16.21.11
+ public_network:
+ 172.16.21.0/24
+ roles:
+ - admin
+ - mon
+ - mds
+ time_server:
+ admin.ceph
+ time_service:
+ ntp
+
+ The above mentioned settings are distributed into several configuration
+ files. The directory structure with these files is defined in the
+ /srv/pillar/ceph/stack/stack.cfg directory. The
+ following files usually describe your cluster:
+
+
+
+
+ /srv/pillar/ceph/stack/global.yml - the file affects
+ all minions in the Salt cluster.
+
+
+
+
+ /srv/pillar/ceph/stack/ceph/cluster.yml
+ - the file affects all minions in the Ceph cluster called
+ ceph.
+
+
+
+
+ /srv/pillar/ceph/stack/ceph/roles/role.yml
+ - affects all minions that are assigned the specific role in the
+ ceph cluster.
+
+
+
+
+ /srv/pillar/ceph/stack/cephminions/minion
+ ID/yml - affects the individual minion.
+
+
+
+
+ Overwriting Directories with Default Values
+
+ There is a parallel directory tree that stores default configuration setup
+ in /srv/pillar/ceph/stack/default. Do not change
+ values here, as they are overwritten.
+
+
+
+ The typical procedure of changing the collected configuration is the
+ following:
+
+
+
+
+ Find the location of the configuration item you need to change. For
+ example, if you need to change cluster related thing like cluster
+ network, edit the file
+ /srv/pillar/ceph/stack/ceph/cluster.yml.
+
+
+
+
+ Save the file.
+
+
+
+
+ Verify the changes by running:
+
+root@master > salt '*' saltutil.pillar_refresh
+
+ and then
+
+root@master > salt '*' pillar.items
+
+
+
+
+
+
+ Deploying with ceph-deploy
+
+ ceph-deploy is a command line utility to simplify the way
+ you deploy Ceph cluster in small scale setups.
+
+
+ Ceph Layout
+
+
+ For testing purposes, a minimal Ceph cluster can be made to run on a
+ single node. However, in a production setup we recommend using at least four
+ nodes: one admin node and three cluster nodes, each running one monitor
+ daemon and some number of object storage daemons (OSDs).
+
+
+
+
+
+
+ Although Ceph nodes can be virtual machines, real hardware is strongly
+ recommended for the production environment.
+
+
+
+
+ Network Recommendations
+
+
+ The network environment where you intend to run Ceph should ideally be a
+ bonded set of at least two network interfaces that is logically split into a
+ public part and trusted internal part using VLANs. The bonding mode is
+ recommended to be 802.3ad when possible to provide maximum bandwidth and
+ resiliency.
+
+
+
+ The public VLAN serves for providing the service to the customers, the
+ internal part provides for the authenticated Ceph network communication.
+ The main reason is that although Ceph authentication and protection
+ against attacks once secret keys are in place, the messages used to
+ configure these keys may be transferred open and are vulnerable.
+
+
+
+ Nodes Configured via DHCP
+
+ If your storage nodes are configured via DHCP, the default timeouts may not
+ be sufficient for the network to be configured correctly before the various
+ Ceph daemons start. If this happens, the Ceph MONs and OSDs will not
+ start correctly (running systemctl status ceph\* will
+ result in "unable to bind" errors), and Calamari may be unable to display
+ graphs. To avoid this issue, we recommend increasing the DHCP client
+ timeout to at least 30 seconds on each node in your storage cluster. This
+ can be done by changing the following settings on each node:
+
+
+ In /etc/sysconfig/network/dhcp set
+
+DHCLIENT_WAIT_AT_BOOT="30"
+
+ In /etc/sysconfig/network/config set
+
+WAIT_FOR_INTERFACES="60"
+
+
+
+ Preparing Each Ceph Node
+
+
+ Before deploying the Ceph cluster, apply the following steps for each
+ Ceph node as root:
+
+
+
+
+
+ Install SUSE Linux Enterprise 12 SP2 and add the SUSE Enterprise Storage extension.
+
+
+
+ On the Installation Settings screen, click
+ Software. On the Software Selection and System
+ Tasks screen, there are several tasks related to SUSE Enterprise Storage. For
+ OSDs, monitors, or the admin server, be sure to choose SUSE Enterprise Storage server
+ packages and confirm with OK.
+
+
+
+ For more information on the extension installation, see
+ .
+
+
+
+
+ Check the firewall status
+
+sudo /sbin/SuSEfirewall2 status
+
+ and if it is on, either turn it off with
+
+sudo /sbin/SuSEfirewall2 off
+
+ or, if you want to keep it on, enable the appropriate set of ports. You
+ can find detailed information in
+ .
+
+
+
+
+ Make sure that network settings are correct: each Ceph node needs to
+ route to all other Ceph nodes, and each Ceph node needs to resolve all
+ other Ceph nodes by their short host names (without the domain suffix).
+ If these two conditions are not met, Ceph fails.
+
+
+ Calamari Node
+
+ If you plan to deploy the Calamari monitoring and management environment
+ (refer to for more information),
+ each Ceph node needs to reach the Calamari node as well.
+
+
+
+
+
+ Install and set up NTP—the time synchronization tool. We strongly
+ recommend using NTP within the Ceph cluster. The reason is that Ceph
+ daemons pass critical messages to each other, which must be processed
+ before daemons reach a timeout threshold. If the clocks in Ceph monitors
+ are not synchronized, it can lead to a number of anomalies, such as
+ daemons ignoring received messages.
+
+
+ Even though clock drift may still be noticeable with NTP, it is not yet
+ harmful.
+
+
+ To install NTP, run the following:
+
+sudo zypper in ntp yast2-ntp-client
+
+ To configure NTP, go to YaST
+ Network Services NTP
+ Configuration. Make sure to enable the NTP service
+ (systemctl enable ntpd.service && systemctl start
+ ntpd.service). Find more detailed information on NTP in the
+
+ SLES Administration Guide.
+
+
+
+
+ Install SSH server. Ceph uses SSH to log in to all cluster nodes. Make
+ sure SSH is installed (zypper in openssh) and enabled
+ (systemctl enable sshd.service && systemctl start
+ sshd.service).
+
+
+
+
+ Add a cephadm user account, and set password for it. The admin node
+ will log in to Ceph nodes as this particular cephadm user .
+
+sudo useradd -m cephadm && passwd cephadm
+
+
+
+ The admin node needs to have passwordless SSH access to all Ceph nodes.
+ When ceph-deploy logs in to a Ceph node as a
+ cephadm user, this user must have passwordless sudo
+ privileges.
+
+
+ Edit the /etc/sudoers file (with
+ visudo) and add the following line to add the
+ sudo command for the cephadm user:
+
+cephadm ALL = (root) NOPASSWD:ALL
+
+ Disable requiretty
+
+ You may receive an error while trying to execute
+ ceph-deploy commands. If requiretty
+ is set by default, disable it by executing sudo visudo
+ and locate the Defaults requiretty setting. Change it
+ to Defaults:cephadm !requiretty to ensure
+ that ceph-deploy can connect using the cephadm user
+ and execute commands with sudo.
+
+
+
+
+
+ On the admin node, become the cephadm user, and enable passwordless SSH
+ access to all other Ceph nodes:
+
+su - cephadm
+cephadm > ssh-keygen
+
+ You will be asked several questions. Leave the values at their defaults,
+ and the passphrase empty.
+
+
+ Copy the key to each Ceph node:
+
+ssh-copy-id cephadm@node1
+ssh-copy-id cephadm@node2
+ssh-copy-id cephadm@node3
+
+ Running ceph-deploy from a Different User Account Than cephadm
+
+ It is possible to run the ceph-deploy command even if
+ you are logged in as a different user than cephadm. For this purpose,
+ you need to set up an SSH alias in your
+ ~/.ssh/config file:
+
+[...]
+Host ceph-node1
+ Hostname ceph-node1
+ User cephadm
+
+ After this change, ssh ceph-node1 automatically uses
+ the cephadm user to log in.
+
+
+
+
+
+
+ Cleaning Previous Ceph Environment
+
+
+ If at any point during the Ceph deployment you run into trouble and need
+ to start over, or you want to make sure that any previous Ceph
+ configuration is removed, execute the following commands as cephadm user
+ to purge the previous Ceph configuration.
+
+
+
+
+ Be aware that purging previous Ceph installation
+ destroys stored data and access settings.
+
+
+
+cephadm > ceph-deploy purge node1 node2 node3
+cephadm > ceph-deploy purgedata node1 node2 node3
+cephadm > ceph-deploy forgetkeys
+
+
+ Running ceph-deploy
+
+
+ After you prepared each Ceph node as described in
+ , you are ready to deploy
+ Ceph from the admin node with ceph-deploy. Note that
+ ceph-deploy will not successfully install an OSD on disks
+ that have been previously used, unless you first 'zap' them. Be aware that
+ 'zapping' erases the entire disk content:
+
+
+cephadm > ceph-deploy disk zap node:vdb
+
+
+
+
+ Install ceph and ceph-deploy:
+
+sudo zypper in ceph ceph-deploy
+
+
+
+ Disable IPv6. Open /etc/sysctl.conf, edit the
+ following lines, and reboot the admin node:
+
+net.ipv6.conf.all.disable_ipv6 = 1
+net.ipv6.conf.default.disable_ipv6 = 1
+net.ipv6.conf.lo.disable_ipv6 = 1
+
+
+
+ Because it is not recommended to run ceph-deploy as
+ root, become the cephadm user:
+
+su - cephadm
+
+
+
+ Run ceph-deploy to install Ceph on each node:
+
+cephadm > ceph-deploy install node1 node2 node3
+
+
+ ceph-deploy creates important files in the directory
+ where you run it from. It is best to run ceph-deploy
+ in an empty directory.
+
+
+
+
+
+ Set up the monitor nodes. Create keys and local configuration. The keys
+ are used to authenticate and protect the communication between Ceph
+ nodes.
+
+cephadm > ceph-deploy new node1 node2 node3
+
+ During this step, ceph-deploy creates local
+ configuration files. It is recommended to inspect the configuration files
+ in the current directory.
+
+
+ Monitor Nodes on Different Subnets
+
+ If the monitor nodes are not in the same subnet, you need to modify the
+ ceph.conf in the current directory. For example, if
+ the nodes have IP addresses
+
+10.121.9.186
+10.121.10.186
+10.121.11.186
+
+ add the following line to the global section of
+ ceph.conf:
+
+public network = 10.121.0.0/16
+
+ Since you are likely to experience problems with IPv6 networking,
+ consider modifying the IPv6 mon_host settings, as in the following
+ example:
+
+mon_host = [2620:...10:121:9:186,2620:...10:121:10:186,2620:...10:121:11:186]
+
+ into its IPv4 equivalent:
+
+mon_host = 10.121.9.186, 10.121.10.186, 10.121.11.186
+
+
+
+
+ Create the initial monitor service on already created monitor nodes:
+
+cephadm > ceph-deploy mon create-initial
+
+
+
+ Any node from which you need to run Ceph command line tools needs a copy
+ of the admin keyring. To copy the admin keyring to a node or set of nodes,
+ run
+
+cephadm > ceph-deploy admin node1 node2 node3
+
+
+ Because the client.admin's keyring file is readable by
+ root only, you need to use sudo when running the
+ ceph command.
+
+
+
+
+
+ Check the firewall status
+
+sudo /sbin/SuSEfirewall2 status
+
+ and if it is off, check its configuration and turn it on with
+
+sudo /sbin/SuSEfirewall2 on
+
+ You can find detailed information in
+ .
+
+
+
+
+ Create OSD daemons. Although you can use a directory as a storage, we
+ recommend to create a separate disk dedicated to a Ceph node. To find
+ out the name of the disk device, run
+
+cat /proc/partitions
+major minor #blocks name
+
+ 254 0 12582912 vda
+ 254 1 1532928 vda1
+ 254 2 11048960 vda2
+ 11 0 2831360 sr0
+ 254 16 4194304 vdb
+
+ In our case the vdb disk has no partitions, so it
+ is most likely our newly created disk.
+
+
+ Now set up the disk for Ceph:
+
+cephadm > ceph-deploy osd prepare node:vdb
+
+ Using Existing Partitions
+
+ If you need to create OSDs on already existing partitions, you need to
+ set their GUIDs correctly. See
+ for more details.
+
+
+
+
+ If the disk was already used before, add the
+ option.
+
+cephadm > ceph-deploy osd prepare --zap node:vdb
+
+ Be aware that 'zapping' erases the entire disk content.
+
+
+
+
+ Default File System for OSDs
+
+ The default and only supported file system for OSDs is
+ xfs.
+
+
+
+ Optionally, activate the OSD:
+
+cephadm > ceph-deploy osd activate node:vdb1
+
+
+ To join the functionality of ceph-deploy osd prepare
+ and ceph-deploy osd activate, use ceph-deploy
+ osd create.
+
+
+
+
+
+ To test the status of the cluster, run
+
+sudo ceph -k ceph.client.admin.keyring health
+
+
+
+
+ Non-default Cluster Name
+
+ If you need to install the cluster with ceph-deploy
+ using a name other than the default cluster name, you
+ need to initially specify it with , and then
+ specify it in each ceph-deploy command related to that
+ cluster:
+
+ceph-deploy --cluster my_cluster new [...]
+ceph-deploy --ceph-conf my_cluster.conf mon create-initial
+ceph-deploy --ceph-conf my_cluster.conf osd prepare [...]
+ceph-deploy --ceph-conf my_cluster.conf osd activate [...]
+
+ Note that using a name other than default cluster name is not supported by
+ SUSE.
+
+
+
+
+
+ Deploying with Crowbar
+
+ Crowbar () is a framework to
+ build complete deployments. It helps you transform groups of bare-metal nodes
+ into an operational cluster within relatively short time.
+
+
+ The deployment process consists of two basic steps: first you need to install
+ and set up the Crowbar admin server, then use it to deploy the available
+ OSD/monitor nodes.
+
+
+ Installing and Setting Up the Crowbar Admin Server
+
+
+ Crowbar admin server is a stand-alone host with SUSE Linux Enterprise Server 12 SP2 installed, operating
+ in the same network as the Ceph OSD/MON nodes to be deployed. You need to
+ configure the Crowbar admin server so that it provides software repositories
+ required for Ceph deployment via TFTP protocol and PXE network boot.
+
+
+
+
+
+ Install and register SUSE Linux Enterprise Server 12 SP2 on the Crowbar admin server. Optionally, you
+ can install and register the SUSE Enterprise Storage 4 extension at the same time. For
+ more information on SUSE Linux Enterprise Server installation, see
+ .
+ For more information on the extensions installation, see
+ .
+
+
+
+ Crowbar admin server does not require any graphical interface. To save the
+ system resources and disk space, it is enough to install the
+ Base System, Minimal System and, if
+ you chose to install the SUSE Enterprise Storage 4 extension, SUSE Enterprise
+ Storage Crowbar patterns from the Software Selection
+ and System Tasks window. If you plan to synchronize
+ repositories (see
+ ) with SMT,
+ add the Subscription Management Tool pattern as well.
+
+
+
+
+
+ Configure network settings for the Crowbar admin server. The server needs
+ to have a static IP address assigned, and the full host name including the
+ domain name specified (for example
+ crowbar-admin.example.com). Check with
+ hostname -f if the host name resolves correctly. The
+ local network where you deploy the cluster needs to have the DHCP server
+ disabled as the Crowbar admin server runs its own.
+
+
+
+ Crowbar admin server default IP address is 192.168.124.10. If it is
+ possible to keep that IP in your network environment, you can save some
+ time on reconfiguring the Crowbar network settings.
+
+
+
+
+
+ Configure NTP to keep the server's time synchronized. See
+
+ for more information on the NTP protocol.
+
+
+
+
+ Make sure that SSH is enabled and started on the server.
+
+
+
+
+ Install and register the SUSE Enterprise Storage 4 extension if you did not install it
+ in step 1. For more information on extension installation, see
+ .
+ Then install the SUSE Enterprise Storage Crowbar
+ pattern in YaST. If you prefer the command line, run sudo
+ zypper in -t pattern ses_admin.
+
+
+
+
+ Mount software repositories required for Ceph nodes deployment with
+ Crowbar. See for
+ more detailed information.
+
+
+
+
+ If you need to further customize the Crowbar admin server settings, refer
+ to the Crowbar Setup chapter of the current
+ SUSE OpenStack Cloud Deployment Guide at
+ .
+
+
+
+
+ Run the following commands to complete the Crowbar admin server setup. The
+ install-ses-admin script outputs a lot of information to
+ the /var/log/crowbar/install.log log file which can be
+ examined in the case of failure. Run it in the
+ screen environment for safety reasons, as the
+ network will be reconfigured during its run and interrupts may occur.
+
+sudo systemctl start crowbar-init
+sudo crowbarctl database create
+screen install-ses-admin
+
+ Be patient as the script takes several minutes to finish.
+
+
+
+
+ After the script successfully finishes, you can view the Crowbar admin
+ server Web UI by pointing your Web browser to the Crowbar admin server IP
+ address (http://192.168.124.10 by default).
+
+
+
+
+
+ Prepare Software Repositories
+
+ Crowbar admin server needs to provide several software repositories so that
+ the Ceph nodes can install required packages from them on PXE boot. These
+ repositories need to be mounted/synchronized under
+ /srv/tftpboot/suse-12.2. The following description is
+ based on the AMD64/Intel 64 architecture.
+
+
+ Synchronizing Repositories
+
+ There are several ways to provide the content in the repository
+ directories. You can, for example, run your local SMT instance,
+ synchronize the repositories, and then export them via NFS and mount them
+ on the Crowbar admin server.
+
+
+
+
+ /srv/tftpboot/suse-12.2/x86_64/install
+
+
+ This directory needs to contain the contents of the SUSE Linux Enterprise Server 12 SP2 DVD disc
+ #1. Ceph nodes need it for the base SUSE Linux Enterprise Server 12 SP2 installation. You can
+ either mount the downloaded .iso image as a loop device, or copy its
+ content with rsync.
+
+
+
+
+ /srv/tftpboot/suse-12.2/x86_64/repos/SLES12-SP2-Pool
+
+
+ Base software repository for SUSE Linux Enterprise Server 12 SP2.
+
+
+
+
+ /srv/tftpboot/suse-12.2/x86_64/repos/SLES12-SP2-Updates
+
+
+ Repository containing updates for SUSE Linux Enterprise Server 12 SP2.
+
+
+
+
+ /srv/tftpboot/suse-12.2/x86_64/repos/SUSE-Enterprise-Storage-4-Pool
+
+
+ Base software repository for SES 4.
+
+
+
+
+ /srv/tftpboot/suse-12.2/x86_64/repos/SUSE-Enterprise-Storage-4-Updates
+
+
+ Repository containing updates for SES 4.
+
+
+
+
+
+
+
+ Deploying the Ceph Nodes
+
+
+ The Crowbar Web interface runs on the Administration Server. It provides an overview of the most
+ important deployment details, including a view on the nodes and which roles
+ are deployed on which nodes, and on the barclamp proposals that can be edited
+ and deployed. In addition, the Crowbar Web interface shows details about the networks
+ and switches in your cluster. It also provides graphical access to some
+ tools with which you can manage your repositories, back up or restore the
+ Administration Server, export the Chef configuration, or generate a
+ supportconfig TAR archive with the most important log
+ files.
+
+
+
+ Logging In
+
+ The Crowbar Web interface uses the HTTP protocol and port 80.
+
+
+ Logging In to the Crowbar Web Interface
+
+
+ On any machine, start a Web browser and make sure that JavaScript and
+ cookies are enabled.
+
+
+
+
+ As URL, enter the IP address of the Administration Server, for example:
+
+http://192.168.124.10/
+
+
+
+ Log in as user crowbar. If you
+ have not changed the password, it is crowbar by
+ default.
+
+
+
+
+
+ Changing the Password for the Crowbar Web Interface
+
+
+ After being logged in to the Crowbar Web interface, select
+ Barclamp Crowbar .
+
+
+
+
+ Select the Crowbar barclamp entry and
+ Edit the proposal.
+
+
+
+
+ In the Attributes section, click
+ Raw to edit the configuration file.
+
+
+
+
+ Search for the following entry:
+
+"crowbar": {
+ "password": "crowbar"
+
+
+
+ Change the password.
+
+
+
+
+ Confirm your change by clicking Save and
+ Apply.
+
+
+
+
+
+
+ Node Installation
+
+ The Ceph nodes represent the actual cluster infrastructure. Node
+ installation and service deployment is done automatically from the
+ Administration Server. Before deploying the Ceph service, SUSE Linux Enterprise Server 12 SP2 will be installed
+ on all nodes.
+
+
+ To install a node, you need to boot it first using PXE. It will be booted
+ with an image that enables the Administration Server to discover the node and make it
+ available for installation. When you have allocated the node, it will boot
+ using PXE again and the automatic installation will start.
+
+
+
+
+ Boot all nodes that you want to deploy using PXE. The nodes will boot
+ into the SLEShammer
image, which performs the initial
+ hardware discovery.
+
+
+ Limit the Number of Concurrent Boots using PXE
+
+ Booting many nodes using PXE at the same time will cause heavy load on
+ the TFTP server, because all nodes will request the boot image at the
+ same time. It is recommended to boot the nodes time-delayed.
+
+
+
+
+
+ Open a browser and point it to the Crowbar Web interface on the Administration Server,
+ for example http://192.168.124.10/. Log in as user
+ crowbar. The password is
+ crowbar by default, if you have not changed it.
+
+
+ Click Nodes Dashboard
+ to open the Node Dashboard.
+
+
+
+
+ Each node that has successfully booted will be listed as being in state
+ Discovered, indicated by a yellow bullet. The nodes
+ will be listed with their MAC address as a name. Wait until all nodes are
+ listed as being Discovered before proceeding. In case
+ a node does not report as being Discovered, it may
+ need to be rebooted manually.
+
+
+
+
+
+ Although this step is optional, it is recommended to properly group your
+ nodes at this stage, since it lets you clearly arrange all nodes.
+ Grouping the nodes by role would be one option, for example monitor nodes
+ and OSD nodes.
+
+
+
+
+ Enter the name of a new group into the New Group
+ text box and click Add Group.
+
+
+
+
+ Drag and drop a node onto the title of the newly created group. Repeat
+ this step for each node you want to put into the group.
+
+
+
+
+
+
+
+ To allocate all nodes, click Nodes
+ Bulk Edit. To allocate a single node,
+ click the name of a node, then click Edit.
+
+
+
+ Limit the Number of Concurrent Node Deployments
+
+ Deploying many nodes in bulk mode will cause heavy load on the
+ Administration Server. The subsequent concurrent Chef client runs triggered by the
+ nodes will require a lot of RAM on the Administration Server.
+
+
+ Therefore it is recommended to limit the number of concurrent
+ Allocations
in bulk mode. The maximum number depends on
+ the amount of RAM on the Administration Server—limiting concurrent deployments
+ to five up to ten is recommended.
+
+
+
+
+
+ In single node editing mode, you can also specify the Filesystem
+ Type for the node. By default, it is set to
+ ext4 for all nodes. It is recommended to keep this
+ default.
+
+
+
+
+ Provide a meaningful Alias, Public
+ Name and a Description for each node and
+ check the Allocate box. You can also specify the
+ Intended Role for the node. This optional setting is
+ used to make reasonable proposals for the barclamps.
+
+
+ By default Target Platform is set to SLES 12
+ SP2.
+
+
+ Alias Names
+
+ Providing an alias name will change the default node names (MAC address)
+ to the name you provided, making it easier to identify the node.
+ Furthermore, this alias will also be used as a DNS
+ CNAME for the node in the admin network. As a result,
+ you can access the node via this alias when, for example, logging in via
+ SSH.
+
+
+
+
+
+
+ When you have filled in the data for all nodes, click
+ Save. The nodes will reboot and commence the
+ AutoYaST-based SUSE Linux Enterprise Server installation (or installation of other target platforms,
+ if selected) via a second boot using PXE. Click
+ Nodes Dashboard to
+ return to the Node Dashboard.
+
+
+
+
+ Nodes that are being installed are listed with the status
+ Installing (yellow/green bullet). When the
+ installation of a node has finished, it is listed as being
+ Ready, indicated by a green bullet. Wait until all
+ nodes are listed as being Ready before proceeding.
+
+
+
+
+
+
+
+ Barclamps
+
+ The Ceph service is automatically installed on the nodes by using
+ so-called barclamps—a set of recipes, templates, and installation
+ instructions. A barclamp is configured via a so-called proposal. A proposal
+ contains the configuration of the service(s) associated with the barclamp
+ and a list of machines onto which the barclamp should be deployed.
+
+
+ All existing barclamps can be accessed from the Crowbar Web interface by
+ clicking Barclamps. To create or edit barclamp proposals
+ and deploy them, proceed as follows:
+
+
+
+
+ Open a browser and point it to the Crowbar Web interface available on the
+ Administration Server, for example http://192.168.124.10/. Log in
+ as user crowbar. The password
+ is crowbar by default, if you have not changed it.
+
+
+ Click Barclamps to open the All
+ Barclamps menu. Alternatively you may filter the list to
+ Crowbar or SUSE Enterprise Storage
+ barclamps by choosing the respective option from
+ Barclamps. The Crowbar barclamps
+ contain general recipes for setting up and configuring all nodes, while
+ the SUSE Enterprise Storage barclamps are dedicated to
+ Ceph service deployment and configuration.
+
+
+
+
+ You can either Create a proposal or
+ Edit an existing one.
+
+
+ Most Ceph barclamps consist of two sections: the
+ Attributes section lets you change the configuration,
+ and the Node Deployment section lets you choose onto
+ which nodes to deploy the barclamp.
+
+
+
+
+ To edit the Attributes section, change the values via
+ the Web form. Alternatively you can directly edit the configuration file
+ by clicking Raw.
+
+
+ Raw Mode
+
+ If you switch between Raw mode and Web form
+ (Custom mode), make sure to Save
+ your changes before switching, otherwise they will be lost.
+
+
+
+
+
+ To assign nodes to a role, use the Deployment section
+ of the barclamp. It shows the Available Nodes that you
+ can assign to the roles belonging to the barclamp.
+
+
+ One or more nodes are usually automatically pre-selected for available
+ roles. If this pre-selection does not meet your requirements, click the
+ Remove icon next to the role to remove the assignment.
+ Assign a node or cluster of your choice by selecting the respective entry
+ from the list of Available Nodes, Available
+ Clusters, or Available Clusters with Remote
+ Nodes. Drag it to the desired role and drop it onto the
+ role name. Do not drop a node
+ or cluster onto the text box—this is used to filter the list of
+ available nodes or clusters!
+
+
+
+
+ To save and deploy your edits, click Apply. To save
+ your changes without deploying them, click Save. To
+ remove the complete proposal, click Delete. A proposal
+ that already has been deployed can only be deleted manually, see
+ for details.
+
+
+ If you deploy a proposal onto a node where a previous one is still
+ active, the new proposal will overwrite the old one.
+
+
+ Wait Until a Proposal has been Deployed
+
+ Deploying a proposal might take some time (up to several minutes). It is
+ strongly recommended to always wait until you see the note
+ Successfully applied the proposal
before proceeding on to
+ the next proposal.
+
+
+
+
+
+ Delete a Proposal That Already Has Been Deployed
+
+ To delete a proposal that already has been deployed, you first need to
+ Deactivate it in the Crowbar Web interface. Deactivating
+ a proposal removes the chef role from the nodes, so the routine that
+ installed and set up the services is not executed anymore. After a
+ proposal has been deactivated, you can Delete it in the
+ Crowbar Web interface to remove the barclamp configuration data from the
+ server.
+
+
+ Deactivating and deleting a barclamp that already had been deployed does
+ not remove packages installed when the barclamp was
+ deployed. Nor does it stop any services that were started during the
+ barclamp deployment. To undo the deployment on the affected node, you need
+ to stop (systemctl stop
+ service) the respective services and
+ disable (systemctl disable
+ service) them. Uninstalling packages
+ should not be necessary.
+
+
+
+ Queuing/Dequeuing Proposals
+
+ When a proposal is applied to one or more nodes that are nor yet available
+ for deployment (for example because they are rebooting or have not been
+ fully installed, yet), the proposal will be put in a queue. A message like
+
+Successfully queued the proposal until the following become ready: d52-54-00-6c-25-44
+
+ will be shown when having applied the proposal. A new button
+ Dequeue will also become available. Use it to cancel
+ the deployment of the proposal by removing it from the queue.
+
+
+
+
+
+ Deploying Ceph
+
+ For Ceph at least four nodes are required. If deploying the optional
+ Calamari server for Ceph management and monitoring, an additional node is
+ required.
+
+
+ The Ceph barclamp has the following configuration options:
+
+
+
+ Disk Selection Method
+
+
+
+ Choose whether to only use the first available disk or all available
+ disks. Available disks
are all disks currently not used
+ by the system. Note that one disk (usually
+ /dev/sda) of every block storage node is already
+ used for the operating system and is not available for Ceph.
+
+
+
+
+ Number of Replicas of an Object
+
+
+
+ For data security, stored objects are not only stored once, but
+ redundantly. Specify the number of copies that should be stored for each
+ object with this setting. The number includes the object itself. If you
+ for example want the object plus two copies, specify 3.
+
+
+
+
+ SSL Support for RadosGW
+
+
+
+ Choose whether to encrypt public communication
+ (HTTPS) or not (HTTP). If choosing
+ HTTPS, you need to specify the locations for the
+ certificate key pair files.
+
+
+
+
+ Calamari Credentials
+
+
+
+ Calamari is a Web front-end for managing and analyzing the Ceph
+ cluster. Provide administrator credentials (user name, password, e-mail
+ address) in this section. When Ceph has bee deployed you can log in to
+ Calamari with these credentials. Deploying Calamari is
+ optional—leave these text boxes empty when not deploying Calamari.
+
+
+
+
+
+
+ The Ceph service consists of the following different roles:
+
+
+
+ ceph-osd
+
+
+
+ The virtual block storage service. Install this role on all dedicated
+ Ceph Storage Nodes (at least three), but not on any other node.
+
+
+
+
+ ceph-mon
+
+
+
+ Cluster monitor daemon for the Ceph distributed file system.
+ ceph-mon needs to be installed on three or five
+ dedicated nodes.
+
+
+
+
+ ceph-calamari
+
+
+
+ Sets up the Calamari Web interface which lets you manage the Ceph
+ cluster. Deploying it is optional. The Web interface can be accessed via
+ http://IP-ADDRESS/ (where
+ IP-ADDRESS is the address of the machine
+ where ceph-calamari is deployed on).
+ ceph-calamari needs to be installed on a dedicated
+ node—it is not possible to install it on a
+ nodes running other services.
+
+
+
+
+ ceph-radosgw
+
+
+
+ The HTTP REST gateway for Ceph. Install it on a dedicated node.
+
+
+
+
+ ceph-mds
+
+
+
+ The metadata server daemon for the CephFS. Install it on a dedicated
+ node. For more information on CephFS refer to
+ .
+
+
+
+
+
+
+
+
+
+ Upgrading from Previous Releases
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES4
+
+
+
+ This chapter introduces steps to upgrade SUSE Enterprise Storage from the previous
+ release(s) to the current one.
+
+
+ General Upgrade Procedure
+
+
+ Before upgrading the Ceph cluster, you need to have both the underlying
+ SUSE Linux Enterprise Server and SUSE Enterprise Storage correctly registered against SCC or SMT. You can upgrade
+ daemons in your cluster while the cluster is online and in service. Certain
+ types of daemons depend upon others. For example Ceph RADOS Gateways depend upon
+ Ceph monitors and Ceph OSD daemons. We recommend upgrading in this
+ order:
+
+
+
+
+
+ Admin node (if you deployed the cluster using the admin node).
+
+
+
+
+ Ceph monitors.
+
+
+
+
+ Ceph OSD daemons.
+
+
+
+
+ Ceph RADOS Gateways.
+
+
+
+
+
+
+ We recommend upgrading all the daemons of a specific type—for example
+ all monitor daemons or all OSD daemons—one by one to ensure that they
+ are all on the same release. We also recommend that you upgrade all the
+ daemons in your cluster before you try to exercise new functionality in a
+ release.
+
+
+ After all the daemons of a specific type are upgraded, check their status.
+
+
+ Ensure each monitor has rejoined the quorum after all monitors are
+ upgraded:
+
+ceph mon stat
+
+ Ensure each Ceph OSD daemon has rejoined the cluster after all OSDs are
+ upgraded:
+
+ceph osd stat
+
+
+
+ Upgrade from SUSE Enterprise Storage 2.1/3 to 4
+
+
+ This section includes general tasks when upgrading from SUSE Enterprise Storage 2.1/3 to 4.
+
+
+
+ Software Requirements
+
+ You need to have the following software installed and updated to the latest
+ packages versions on all the Ceph nodes you want to upgrade before you
+ can start with the upgrade procedure:
+
+
+
+
+ SUSE Linux Enterprise Server 12 SP1
+
+
+
+
+ SUSE Enterprise Storage 2.1 or 3
+
+
+
+
+
+
+ To upgrade the SUSE Enterprise Storage 2.1 or 3 cluster to version 4, follow these steps
+ on each cluster node:
+
+
+
+
+
+ Do Not Run zypper dup or Reboot the Node
+
+ After you prepare for the upgrade to SUSE Linux Enterprise Server 12 SP2 as suggested later in
+ this step, do not run zypper dup
+ or reboot the node as its Ceph related services may not start
+ correctly.
+
+
+
+ Upgrade the current SUSE Linux Enterprise Server to version 12 SP2. Refer to
+
+ for more information on supported upgrade methods.
+
+
+
+
+ List all the active services with zypper ls.
+
+zypper ls
+#| Alias | Name | Enabled | Refresh | Type
+-+--------------------------------------------+------+---------+---------+------
+1| SUSE_Enterprise_Storage_3_x86_64 | ... | Yes | Yes | ris
+2| SUSE_Linux_Enterprise_Server_12_SP2_x86_64 | ... | Yes | Yes | ris
+[...]
+
+ Verify that services related to SUSE Linux Enterprise Server 12 SP2 are present and enabled.
+
+
+
+
+ Remove the current SUSE Enterprise Storage service. You can do it as follows:
+
+sudo zypper rs ID
+
+
+
+ Activate SUSE Enterprise Storage 4 service. You can use yast2 add-on.
+
+
+
+
+ Refresh new software repositories:
+
+sudo zypper ref
+
+
+
+ Install the upgrade helper package:
+
+sudo zypper in ses-upgrade-helper
+
+
+
+ Run the upgrade script:
+
+sudo upgrade-ses.sh
+
+ The script does the distribution upgrade of the node. After reboot, the
+ node comes up with SUSE Linux Enterprise Server 12 SP2 and SUSE Enterprise Storage 4 running.
+
+
+
+
+ Check that the folder /var/log/ceph is owned by the ceph. If not, change it:
+
+ sudo chown ceph.ceph /var/log/ceph
+
+
+
+
+ iSCSI Gateways Upgrade
+
+ For iSCSI gateways, consider the following points during upgrades:
+
+
+
+
+ Check that the lrbd service is enabled so that
+ the iSCSI gateway configuration is applied on reboot.
+
+
+
+
+ Upgrade the iSCSI gateway nodes after the monitor and OSD nodes.
+
+
+
+ If an iSCSI gateway includes OSD or monitor processes on the same node,
+ then upgrade and restart these processes before the system is rebooted
+ into the new kernel.
+
+
+
+
+
+ Check that the Ceph cluster health is HEALTH_OK when
+ proceeding with the iSCSI gateway upgrade.
+
+
+
+
+ iSCSI initiators (clients) that require access to storage throughout the
+ upgrade need to be configured with multi-path I/O (MPIO).
+
+
+
+
+ Before rebooting or taking an iSCSI gateway node offline, manually
+ disable the corresponding initiator MPIO device paths on the client.
+
+
+
+
+ Once the gateway is back online, enable the client MPIO device path(s).
+
+
+
+
+ For all gateway nodes exposing a given iSCSI target, care should be
+ taken to ensure that no more than one iSCSI gateway node is offline
+ (rebooted for kernel update) at any moment during the upgrade.
+
+
+
+
+
+
+ Updated Behavior for iSCSI Gateways
+
+
+
+ The is a backward compatibility option for
+ setting the backstore name to only use the name of the image. Starting
+ from SUSE Enterprise Storage 3, the default uses
+
+pool_name-image_name
+
+ Do not use this option for new installations.
+
+"pools": [
+ {
+ "pool": "rbd",
+ "gateways": [
+ {
+ "host": "igw1",
+ "tpg": [
+ {
+ "image": "archive",
+ "rbd_name": "simple"
+ }
+ ]
+ }
+ ]
+ }
+]
+
+
+
+ Likewise, will use the original scheme of
+ target and image name for setting the .
+ The current default uses
+
+pool_name-target-image_name
+
+ Do not use this option for new installations.
+
+"targets": [
+ {
+ "host": "igw1",
+ "target": "iqn.2003-01.org.linux-iscsi.generic.x86:sn.abcdefghijk",
+ "wwn_generate": "original"
+ }
+
+ For more information, see
+ /usr/share/doc/lrbd/README.migration.
+
+
+
+
+ The is deprecated for federated
+ RADOS Gateway deployments in SUSE Enterprise Storage 3. Replace it with the new
+ option in
+ ceph.conf
+
+
+
+
+
+
+
+ Upgrade from SUSE Enterprise Storage 2.1 to 4
+
+ This section includes tasks specific to upgrading SUSE Enterprise Storage 2.1 to 4.
+
+
+ Set osdmap Flag
+
+ When the last OSD is upgraded from SUSE Enterprise Storage 2.1 to 4, the monitor nodes will
+ detect that all OSDs are running jewel and complain that
+ the flag is not set. You need to
+ set this flag manually to acknowledge that, once the cluster has been
+ upgraded to jewel it cannot be downgraded to
+ hammer. Set the flag by running the following command:
+
+ sudo ceph osd set require_jewel_osds
+
+ After the command completes, the warning disappears.
+
+
+ On fresh installs of SUSE Enterprise Storage 4, this flag is set automatically when the
+ Ceph monitors create the initial osdmap, so no end-user action is needed in this
+ case.
+
+
+
+
+
+
+ Operating a Cluster
+
+ Introduction
+
+ In this part of the manual you will learn how to start or stop Ceph
+ services, how to monitor a cluster's state, how to use and modify CRUSH maps,
+ and how to manage storage pools.
+
+
+ It also includes advanced topics, for example how to manage users and
+ authentication in general, how to manage pool and RADOS device snapshots, how
+ to set up erasure coded pools, or how to increase the cluster performance
+ with cache tiering.
+
+
+
+ Operating Ceph Services
+
+ Ceph related services are operated with the systemctl
+ command. The operation takes place on the node you are currently logged in
+ to. You need to have root privileges to be able to operate on Ceph
+ services.
+
+
+ Starting, Stopping, and Restarting Services using Targets
+
+
+ To facilitate starting, stopping, and restarting all the services of a
+ particular type (for example all Ceph services, or all MONs, or all OSDs)
+ on a node, Ceph provides the following systemd unit files:
+
+
+ceph.target
+ceph-osd.target
+ceph-mon.target
+ceph-mds.target
+ceph-radosgw.target
+ceph-rbd-mirror.target
+
+
+ To start/stop/restart all Ceph services on the node, run:
+
+
+systemctl stop ceph.target
+systemctl start ceph.target
+systemctl restart ceph.target
+
+
+ To start/stop/restart all OSDs on the node, run:
+
+
+systemctl stop ceph-osd.target
+systemctl start ceph-osd.target
+systemctl restart ceph-osd.target
+
+
+ Commands for the other targets are analogous.
+
+
+
+ Starting, Stopping, and Restarting Individual Services
+
+
+ You can operate individual services using the following parametrized
+ systemd unit files:
+
+
+ceph-osd@.service
+ceph-mon@.service
+ceph-mds@.service
+ceph-radosgw@.service
+ceph-rbd-mirror@.service
+
+
+ To use these commands, you first need to identify the name of the service
+ you want to operate. See
+ to learn more about
+ services identification.
+
+
+
+ To start/stop/restart the osd.1 service, run:
+
+
+systemctl stop ceph-osd@1.service
+systemctl start ceph-osd@1.service
+systemctl restart ceph-osd@1.service
+
+
+ Commands for the other service types are analogous.
+
+
+
+ Identifying Individual Services
+
+
+ You can find out the names/numbers of a particular type of service by
+ running systemctl and filtering the results with the
+ grep command. For example:
+
+
+systemctl | grep -i 'ceph-osd.*service'
+systemctl | grep -i 'ceph-mon.*service'
+[...]
+
+
+ Service Status
+
+
+ You can query systemd for the status of services. For example:
+
+
+systemctl status ceph-osd@1.service
+systemctl status ceph-mon@vanguard2.service
+
+
+ If you do not know the exact name/number of the service, see
+ .
+
+
+
+
+ Determining Cluster State
+
+ Once you have a running cluster, you may use the ceph tool
+ to monitor your cluster. Determining cluster state typically involves
+ checking OSD status, monitor status, placement group status and metadata
+ server status.
+
+
+ Interactive Mode
+
+ To run the ceph tool in an interactive mode, type
+ ceph at the command line with no arguments. The
+ interactive mode is more convenient if you are going to enter more
+ ceph commands in a row. For example:
+
+ceph
+ceph> health
+ceph> status
+ceph> quorum_status
+ceph> mon_status
+
+
+ Checking Cluster Health
+
+
+ After you start your cluster, and before you start reading and/or writing
+ data, check your cluster’s health first. You can check on the health of
+ your Ceph cluster with the following:
+
+
+ceph health
+HEALTH_WARN 10 pgs degraded; 100 pgs stuck unclean; 1 mons down, quorum 0,2 \
+node-1,node-2,node-3
+
+
+ If you specified non-default locations for your configuration or keyring,
+ you may specify their locations:
+
+
+ceph -c /path/to/conf -k /path/to/keyring health
+
+
+ Upon starting the Ceph cluster, you will likely encounter a health warning
+ such as HEALTH_WARN XXX num placement groups stale. Wait
+ a few moments and check it again. When your cluster is ready, ceph
+ health should return a message such as
+ HEALTH_OK. At that point, it is okay to begin using the
+ cluster.
+
+
+
+ Watching a Cluster
+
+
+ To watch the cluster’s ongoing events, open a new terminal and enter:
+
+
+ceph -w
+
+
+ Ceph will print each event. For example, a tiny Ceph cluster consisting
+ of one monitor, and two OSDs may print the following:
+
+
+cluster b370a29d-9287-4ca3-ab57-3d824f65e339
+ health HEALTH_OK
+ monmap e1: 1 mons at {ceph1=10.0.0.8:6789/0}, election epoch 2, quorum 0 ceph1
+ osdmap e63: 2 osds: 2 up, 2 in
+ pgmap v41338: 952 pgs, 20 pools, 17130 MB data, 2199 objects
+ 115 GB used, 167 GB / 297 GB avail
+ 952 active+clean
+
+2014-06-02 15:45:21.655871 osd.0 [INF] 17.71 deep-scrub ok
+2014-06-02 15:45:47.880608 osd.1 [INF] 1.0 scrub ok
+2014-06-02 15:45:48.865375 osd.1 [INF] 1.3 scrub ok
+2014-06-02 15:45:50.866479 osd.1 [INF] 1.4 scrub ok
+[...]
+2014-06-02 15:45:55.720929 mon.0 [INF] pgmap v41343: 952 pgs: \
+ 1 active+clean+scrubbing+deep, 951 active+clean; 17130 MB data, 115 GB used, \
+ 167 GB / 297 GB avail
+
+
+ The output provides the following information:
+
+
+
+
+
+ Cluster ID
+
+
+
+
+ Cluster health status
+
+
+
+
+ The monitor map epoch and the status of the monitor quorum
+
+
+
+
+ The OSD map epoch and the status of OSDs
+
+
+
+
+ The placement group map version
+
+
+
+
+ The number of placement groups and pools
+
+
+
+
+ The notional amount of data stored and the number of
+ objects stored; and,
+
+
+
+
+ The total amount of data stored.
+
+
+
+
+
+ How Ceph Calculates Data Usage
+
+ The used value reflects the actual amount of raw storage
+ used. The xxx GB / xxx GB value means the amount
+ available (the lesser number) of the overall storage capacity of the
+ cluster. The notional number reflects the size of the stored data before it
+ is replicated, cloned or snapshotted. Therefore, the amount of data
+ actually stored typically exceeds the notional amount stored, because
+ Ceph creates replicas of the data and may also use storage capacity for
+ cloning and snapshotting.
+
+
+
+
+ Checking a Cluster’s Usage Stats
+
+
+ To check a cluster’s data usage and data distribution among pools, you can
+ use the df option. It is similar to Linux
+ df. Execute the following:
+
+
+ceph df
+GLOBAL:
+ SIZE AVAIL RAW USED %RAW USED
+ 27570M 27304M 266M 0.97
+POOLS:
+ NAME ID USED %USED MAX AVAIL OBJECTS
+ data 0 120 0 5064M 4
+ metadata 1 0 0 5064M 0
+ rbd 2 0 0 5064M 0
+ hot-storage 4 134 0 4033M 2
+ cold-storage 5 227k 0 5064M 1
+ pool1 6 0 0 5064M 0
+
+
+ The GLOBAL section of the output provides an overview of
+ the amount of storage your cluster uses for your data.
+
+
+
+
+
+ SIZE: The overall storage capacity of the cluster.
+
+
+
+
+ AVAIL: The amount of free space available in the
+ cluster.
+
+
+
+
+ RAW USED: The amount of raw storage used.
+
+
+
+
+ % RAW USED: The percentage of raw storage used. Use
+ this number in conjunction with the full ratio and
+ near full ratio to ensure that you are not reaching
+ your cluster’s capacity. See
+ Storage
+ Capacity for additional details.
+
+
+
+
+
+ The POOLS section of the output provides a list of pools
+ and the notional usage of each pool. The output from this section
+ does not reflect replicas, clones or snapshots. For
+ example, if you store an object with 1MB of data, the notional usage will be
+ 1MB, but the actual usage may be 2MB or more depending on the number of
+ replicas, clones and snapshots.
+
+
+
+
+
+ NAME: The name of the pool.
+
+
+
+
+ ID: The pool ID.
+
+
+
+
+ USED: The notional amount of data stored in kilobytes,
+ unless the number appends M for megabytes or G for gigabytes.
+
+
+
+
+ %USED: The notional percentage of storage used per
+ pool.
+
+
+
+
+ OBJECTS: The notional number of objects stored per
+ pool.
+
+
+
+
+
+
+ The numbers in the POOLS section are notional. They are not inclusive of
+ the number of replicas, snapshots or clones. As a result, the sum of the
+ USED and %USED amounts will not add up to the RAW USED and %RAW USED
+ amounts in the %GLOBAL section of the output.
+
+
+
+
+ Checking a Cluster’s Status
+
+
+ To check a cluster’s status, execute the following:
+
+
+ceph status
+
+
+ or
+
+
+ceph -s
+
+
+ In interactive mode, type status and press
+ .
+
+
+ceph> status
+
+
+ Ceph will print the cluster status. For example, a tiny Ceph cluster
+ consisting of one monitor and two OSDs may print the following:
+
+
+cluster b370a29d-9287-4ca3-ab57-3d824f65e339
+ health HEALTH_OK
+ monmap e1: 1 mons at {ceph1=10.0.0.8:6789/0}, election epoch 2, quorum 0 ceph1
+ osdmap e63: 2 osds: 2 up, 2 in
+ pgmap v41332: 952 pgs, 20 pools, 17130 MB data, 2199 objects
+ 115 GB used, 167 GB / 297 GB avail
+ 1 active+clean+scrubbing+deep
+ 951 active+clean
+
+
+ Checking OSD Status
+
+
+ You can check OSDs to ensure they are up and on by executing:
+
+
+ceph osd stat
+
+
+ or
+
+
+ceph osd dump
+
+
+ You can also view OSDs according to their position in the CRUSH map.
+
+
+ceph osd tree
+
+
+ Ceph will print out a CRUSH tree with a host, its OSDs, whether they are
+ up and their weight.
+
+
+# id weight type name up/down reweight
+-1 3 pool default
+-3 3 rack mainrack
+-2 3 host osd-host
+0 1 osd.0 up 1
+1 1 osd.1 up 1
+2 1 osd.2 up 1
+
+
+ Checking Monitor Status
+
+
+ If your cluster has multiple monitors (likely), you should check the monitor
+ quorum status after you start the cluster before reading and/or writing
+ data. A quorum must be present when multiple monitors are running. You
+ should also check monitor status periodically to ensure that they are
+ running.
+
+
+
+ To display the monitor map, execute the following:
+
+
+ceph mon stat
+
+
+ or
+
+
+ceph mon dump
+
+
+ To check the quorum status for the monitor cluster, execute the following:
+
+
+ceph quorum_status
+
+
+ Ceph will return the quorum status. For example, a Ceph cluster
+ consisting of three monitors may return the following:
+
+
+{ "election_epoch": 10,
+ "quorum": [
+ 0,
+ 1,
+ 2],
+ "monmap": { "epoch": 1,
+ "fsid": "444b489c-4f16-4b75-83f0-cb8097468898",
+ "modified": "2011-12-12 13:28:27.505520",
+ "created": "2011-12-12 13:28:27.505520",
+ "mons": [
+ { "rank": 0,
+ "name": "a",
+ "addr": "127.0.0.1:6789\/0"},
+ { "rank": 1,
+ "name": "b",
+ "addr": "127.0.0.1:6790\/0"},
+ { "rank": 2,
+ "name": "c",
+ "addr": "127.0.0.1:6791\/0"}
+ ]
+ }
+}
+
+
+
+ Checking Placement Group States
+
+
+ Placement groups map objects to OSDs. When you monitor your placement
+ groups, you will want them to be active and
+ clean. For a detailed discussion, refer to
+ Monitoring
+ OSDs and Placement Groups.
+
+
+
+ Using the Admin Socket
+
+
+ The Ceph admin socket allows you to query a daemon via a socket interface.
+ By default, Ceph sockets reside under /var/run/ceph.
+ To access a daemon via the admin socket, log in to the host running the
+ daemon and use the following command:
+
+
+ceph --admin-daemon /var/run/ceph/socket-name
+
+
+ To view the available admin socket commands, execute the following command:
+
+
+ceph --admin-daemon /var/run/ceph/socket-name help
+
+
+ The admin socket command enables you to show and set your configuration at
+ runtime. Refer to Viewing
+ a Configuration at Runtimefor details.
+
+
+
+ Additionally, you can set configuration values at runtime directly (the
+ admin socket bypasses the monitor, unlike ceph tell
+ daemon-type.id
+ injectargs, which relies on the monitor but does not require you to log in
+ directly to the host in question).
+
+
+
+
+ Authentication with cephx
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES2.1
+
+
+
+ To identify clients and protect against man-in-the-middle attacks, Ceph
+ provides its cephx authentication system. Clients in
+ this context are either human users—such as the admin user—or
+ Ceph-related services/daemons, for example OSDs, monitors, or RADOS Gateways.
+
+
+
+ The cephx protocol does not address data encryption in transport, such as
+ TLS/SSL.
+
+
+
+
+
+ Authentication Architecture
+
+
+ cephx uses shared secret keys for authentication, meaning both the client
+ and the monitor cluster have a copy of the client’s secret key. The
+ authentication protocol enables both parties to prove to each other that
+ they have a copy of the key without actually revealing it. This provides
+ mutual authentication, which means the cluster is sure the user possesses
+ the secret key, and the user is sure that the cluster has a copy of the
+ secret key as well.
+
+
+
+ A key scalability feature of Ceph is to avoid a centralized interface to
+ the Ceph object store. This means that Ceph clients can interact with
+ OSDs directly. To protect data, Ceph provides its cephx authentication
+ system, which authenticates Ceph clients.
+
+
+
+ Each monitor can authenticate clients and distribute keys, so there is no
+ single point of failure or bottleneck when using cephx. The monitor
+ returns an authentication data structure that contains a session key for use
+ in obtaining Ceph services. This session key is itself encrypted with the
+ client’s permanent secret key, so that only the client can request
+ services from the Ceph monitors. The client then uses the session key to
+ request its desired services from the monitor, and the monitor provides the
+ client with a ticket that will authenticate the client to the OSDs that
+ actually handle data. Ceph monitors and OSDs share a secret, so the client
+ can use the ticket provided by the monitor with any OSD or metadata server
+ in the cluster. cephx tickets expire, so an attacker cannot use an expired
+ ticket or session key obtained wrongfully. This form of authentication will
+ prevent attackers with access to the communications medium from either
+ creating bogus messages under another client’s identity or altering
+ another client’s legitimate messages, as long as the client secret key is
+ not revealed before it expires.
+
+
+
+ To use cephx, an administrator must setup clients/users first. In the
+ following diagram, the
+ client.admin user invokes
+ ceph auth get-or-create-key from the command line to
+ generate a user name and secret key. Ceph’s auth
+ subsystem generates the user name and key, stores a copy with the monitor(s)
+ and transmits the user’s secret back to the
+ client.admin user. This means that
+ the client and the monitor share a secret key.
+
+
+
+
+
+ To authenticate with the monitor, the client passes the user name to the
+ monitor. The monitor generates a session key and encrypts it with the secret
+ key associated with the user name and transmits the encrypted ticket back to
+ the client. The client then decrypts the data with the shared secret key to
+ retrieve the session key. The session key identifies the user for the
+ current session. The client then requests a ticket related to the user,
+ which is signed by the session key. The monitor generates a ticket, encrypts
+ it with the user’s secret key and transmits it back to the client. The
+ client decrypts the ticket and uses it to sign requests to OSDs and metadata
+ servers throughout the cluster.
+
+
+
+
+
+ The cephx protocol authenticates ongoing communications between the client
+ machine and the Ceph servers. Each message sent between a client and a
+ server after the initial authentication is signed using a ticket that the
+ monitors, OSDs, and metadata servers can verify with their shared secret.
+
+
+
+
+
+
+ The protection offered by this authentication is between the Ceph client
+ and the Ceph cluster hosts. The authentication is not extended beyond the
+ Ceph client. If the user accesses the Ceph client from a remote host,
+ Ceph authentication is not applied to the connection between the user’s
+ host and the client host.
+
+
+
+
+
+
+ Key Management
+
+
+ This section describes Ceph client users and their authentication and
+ authorization with the Ceph storage cluster. Users
+ are either individuals or system actors such as applications, which use
+ Ceph clients to interact with the Ceph storage cluster daemons.
+
+
+
+ When Ceph runs with authentication and authorization enabled (enabled by
+ default), you must specify a user name and a keyring containing the secret
+ key of the specified user (usually via the command line). If you do not
+ specify a user name, Ceph will use
+ client.admin as the default user
+ name. If you do not specify a keyring, Ceph will look for a keyring via
+ the keyring setting in the Ceph configuration file. For example, if you
+ execute the ceph health command without specifying a user
+ name or keyring, Ceph interprets the command like this:
+
+
+ceph -n client.admin --keyring=/etc/ceph/ceph.client.admin.keyring health
+
+
+ Alternatively, you may use the CEPH_ARGS environment
+ variable to avoid re-entering the user name and secret.
+
+
+
+ Background Information
+
+ Regardless of the type of Ceph client (for example, block device, object
+ storage, file system, native API), Ceph stores all data as objects within
+ pools. Ceph users need to have access to pools in
+ order to read and write data. Additionally, Ceph users must have execute
+ permissions to use Ceph's administrative commands. The following concepts
+ will help you understand Ceph user management.
+
+
+ User
+
+ A user is either an individual or a system actor such as an application.
+ Creating users allows you to control who (or what) can access your Ceph
+ storage cluster, its pools, and the data within pools.
+
+
+ Ceph uses types of users. For the purposes of user
+ management, the type will always be client. Ceph
+ identifies users in period (.) delimited form, consisting of the user type
+ and the user ID. For example, TYPE.ID,
+ client.admin, or client.user1. The
+ reason for user typing is that Ceph monitors, OSDs, and metadata servers
+ also use the cephx protocol, but they are not clients. Distinguishing the
+ user type helps to distinguish between client users and other users,
+ streamlining access control, user monitoring, and traceability.
+
+
+
+ A Ceph storage cluster user is not the same as a Ceph object storage
+ user or a Ceph file system user. The Ceph RADOS Gateway uses a Ceph storage
+ cluster user to communicate between the gateway daemon and the storage
+ cluster, but the gateway has its own user management functionality for
+ end users. The Ceph file system uses POSIX semantics. The user space
+ associated with it is not the same as a Ceph storage cluster user.
+
+
+
+
+ Authorization and Capabilities
+
+ Ceph uses the term 'capabilities' (caps) to describe authorizing an
+ authenticated user to exercise the functionality of the monitors, OSDs,
+ and metadata servers. Capabilities can also restrict access to data within
+ a pool or a namespace within a pool. A Ceph administrative user sets a
+ user's capabilities when creating or updating a user.
+
+
+ Capability syntax follows the form:
+
+daemon-type 'allow capability' [...]
+
+ Following is a list of capabilities for each service type:
+
+
+
+ Monitor capabilities
+
+
+ include r, w,
+ x and allow profile
+ cap.
+
+mon 'allow rwx'
+mon 'allow profile osd'
+
+
+
+ OSD capabilities
+
+
+ include r, w,
+ x, class-read,
+ class-write and profile osd.
+ Additionally, OSD capabilities also allow for pool and namespace
+ settings.
+
+osd 'allow capability' [pool=poolname] [namespace=namespace-name]
+
+
+
+ MDS capability
+
+
+ simply requires allow, or blank.
+
+mds 'allow'
+
+
+
+
+ The following entries describe each capability:
+
+
+
+ allow
+
+
+ Precedes access settings for a daemon. Implies rw
+ for MDS only.
+
+
+
+
+ r
+
+
+ Gives the user read access. Required with monitors to retrieve the
+ CRUSH map.
+
+
+
+
+ w
+
+
+ Gives the user write access to objects.
+
+
+
+
+ x
+
+
+ Gives the user the capability to call class methods (both read and
+ write) and to conduct auth operations on monitors.
+
+
+
+
+ class-read
+
+
+ Gives the user the capability to call class read methods. Subset of
+ x.
+
+
+
+
+ class-write
+
+
+ Gives the user the capability to call class write methods. Subset of
+ x.
+
+
+
+
+ *
+
+
+ Gives the user read, write, and execute permissions for a particular
+ daemon/pool, and the ability to execute admin commands.
+
+
+
+
+ profile osd
+
+
+ Gives a user permissions to connect as an OSD to other OSDs or
+ monitors. Conferred on OSDs to enable OSDs to handle replication
+ heartbeat traffic and status reporting.
+
+
+
+
+ profile mds
+
+
+ Gives a user permissions to connect as an MDS to other MDSs or
+ monitors.
+
+
+
+
+ profile bootstrap-osd
+
+
+ Gives a user permissions to bootstrap an OSD. Delegated to deployment
+ tools such as ceph-disk,
+ ceph-deploy so that they have permissions to add
+ keys when bootstrapping an OSD.
+
+
+
+
+ profile bootstrap-mds
+
+
+ Gives a user permissions to bootstrap a metadata server. Delegated to
+ deployment tools such as ceph-deploy so they have permissions to add
+ keys when bootstrapping a metadata server.
+
+
+
+
+
+
+ Pools
+
+ A pool is a logical partition where users store data. In Ceph
+ deployments, it is common to create a pool as a logical partition for
+ similar types of data. For example, when deploying Ceph as a back-end
+ for OpenStack, a typical deployment would have pools for volumes, images,
+ backups and virtual machines, and users such as
+ client.glance or
+ client.cinder.
+
+
+
+
+
+ Managing Users
+
+ User management functionality provides Ceph cluster administrators with
+ the ability to create, update, and delete users directly in the Ceph
+ cluster.
+
+
+ When you create or delete users in the Ceph cluster, you may need to
+ distribute keys to clients so that they can be added to keyrings. See
+ for details.
+
+
+ Listing Users
+
+ To list the users in your cluster, execute the following:
+
+ceph auth list
+
+ Ceph will list all users in your cluster. For example, in a cluster with
+ two nodes, ceph auth list output looks similar to this:
+
+installed auth entries:
+
+osd.0
+ key: AQCvCbtToC6MDhAATtuT70Sl+DymPCfDSsyV4w==
+ caps: [mon] allow profile osd
+ caps: [osd] allow *
+osd.1
+ key: AQC4CbtTCFJBChAAVq5spj0ff4eHZICxIOVZeA==
+ caps: [mon] allow profile osd
+ caps: [osd] allow *
+client.admin
+ key: AQBHCbtT6APDHhAA5W00cBchwkQjh3dkKsyPjw==
+ caps: [mds] allow
+ caps: [mon] allow *
+ caps: [osd] allow *
+client.bootstrap-mds
+ key: AQBICbtTOK9uGBAAdbe5zcIGHZL3T/u2g6EBww==
+ caps: [mon] allow profile bootstrap-mds
+client.bootstrap-osd
+ key: AQBHCbtT4GxqORAADE5u7RkpCN/oo4e5W0uBtw==
+ caps: [mon] allow profile bootstrap-osd
+
+ TYPE.ID Notation
+
+ Note that the TYPE.ID notation for users applies such
+ that osd.0 specifies a user of type
+ osd and its ID is 0.
+ client.admin is a user of type
+ client and its ID is admin. Note
+ also that each entry has a key:
+ value entry, and one or more
+ caps: entries.
+
+
+ You may use the
+ option with ceph auth list to save the output to a
+ file.
+
+
+
+
+ Getting Information about Users
+
+ To retrieve a specific user, key, and capabilities, execute the following:
+
+ceph auth get TYPE.ID
+
+ For example:
+
+ceph auth get client.admin
+exported keyring for client.admin
+[client.admin]
+ key = AQA19uZUqIwkHxAAFuUwvq0eJD4S173oFRxe0g==
+ caps mds = "allow"
+ caps mon = "allow *"
+ caps osd = "allow *"
+
+ Developers may also execute the following:
+
+ceph auth export TYPE.ID
+
+ The auth export command is identical to auth
+ get, but also prints the internal authentication ID.
+
+
+
+ Adding Users
+
+ Adding a user creates a user name (TYPE.ID), a secret
+ key, and any capabilities included in the command you use to create the
+ user.
+
+
+ A user's key enables the user to authenticate with the Ceph storage
+ cluster. The user's capabilities authorize the user to read, write, or
+ execute on Ceph monitors (mon), Ceph OSDs (osd), or Ceph metadata
+ servers (mds).
+
+
+ There are a few commands available to add a user:
+
+
+
+ ceph auth add
+
+
+
+ This command is the canonical way to add a user. It will create the
+ user, generate a key, and add any specified capabilities.
+
+
+
+
+ ceph auth get-or-create
+
+
+
+ This command is often the most convenient way to create a user, because
+ it returns a keyfile format with the user name (in brackets) and the
+ key. If the user already exists, this command simply returns the user
+ name and key in the keyfile format. You may use the option to save the output
+ to a file.
+
+
+
+
+ ceph auth get-or-create-key
+
+
+
+ This command is a convenient way to create a user and return the user's
+ key (only). This is useful for clients that need the key only (for
+ example libvirt). If the user already exists, this command simply
+ returns the key. You may use the option to save the output
+ to a file.
+
+
+
+
+
+ When creating client users, you may create a user with no capabilities. A
+ user with no capabilities can authenticate but nothing more. Such client
+ cannot retrieve the cluster map from the monitor. However, you can create
+ a user with no capabilities if you want to defer adding capabilities later
+ using the ceph auth caps command.
+
+
+ A typical user has at least read capabilities on the Ceph monitor and
+ read and write capabilities on Ceph OSDs. Additionally, a user's OSD
+ permissions are often restricted to accessing a particular pool.
+
+ceph auth add client.john mon 'allow r' osd \
+ 'allow rw pool=liverpool'
+ceph auth get-or-create client.paul mon 'allow r' osd \
+ 'allow rw pool=liverpool'
+ceph auth get-or-create client.george mon 'allow r' osd \
+ 'allow rw pool=liverpool' -o george.keyring
+ceph auth get-or-create-key client.ringo mon 'allow r' osd \
+ 'allow rw pool=liverpool' -o ringo.key
+
+
+ If you provide a user with capabilities to OSDs, but you do
+ not restrict access to particular pools, the user will have
+ access to all pools in the cluster.
+
+
+
+
+ Modifying User Capabilities
+
+ The ceph auth caps command allows you to specify a user
+ and change the user's capabilities. Setting new capabilities will
+ overwrite current ones. To view current capabilities run ceph
+ auth get
+ USERTYPE.USERID.
+ To add capabilities, you also need to specify the existing capabilities
+ when using the following form:
+
+ceph auth caps USERTYPE.USERID daemon 'allow [r|w|x|*|...] \
+ [pool=pool-name] [namespace=namespace-name]' [daemon 'allow [r|w|x|*|...] \
+ [pool=pool-name] [namespace=namespace-name]']
+
+ For example:
+
+ceph auth get client.john
+ceph auth caps client.john mon 'allow r' osd 'allow rw pool=prague'
+ceph auth caps client.paul mon 'allow rw' osd 'allow rwx pool=prague'
+ceph auth caps client.brian-manager mon 'allow *' osd 'allow *'
+
+ To remove a capability, you may reset the capability. If you want the user
+ to have no access to a particular daemon that was previously set, specify
+ an empty string:
+
+ceph auth caps client.ringo mon ' ' osd ' '
+
+
+ Deleting Users
+
+ To delete a user, use ceph auth del:
+
+ceph auth del TYPE.ID
+
+ where TYPE is one of client,
+ osd, mon, or mds,
+ and ID is the user name or ID of the daemon.
+
+
+
+ Printing a User's Key
+
+ To print a user’s authentication key to standard output, execute the
+ following:
+
+ceph auth print-key TYPE.ID
+
+ where TYPE is one of client,
+ osd, mon, or mds,
+ and ID is the user name or ID of the daemon.
+
+
+ Printing a user's key is useful when you need to populate client software
+ with a user's key (such as libvirt), as in the following example:
+
+mount -t ceph host:/ mount_point \
+-o name=client.user,secret=`ceph auth print-key client.user`
+
+
+ Importing Users
+
+ To import one or more users, use ceph auth import and
+ specify a keyring:
+
+sudo ceph auth import -i /etc/ceph/ceph.keyring
+
+
+ The Ceph storage cluster will add new users, their keys and their
+ capabilities and will update existing users, their keys and their
+ capabilities.
+
+
+
+
+
+
+ Keyring Management
+
+ When you access Ceph via a Ceph client, the client will look for a
+ local keyring. Ceph presets the keyring setting with the following four
+ keyring names by default so you do not need to set them in your Ceph
+ configuration file unless you want to override the defaults:
+
+/etc/ceph/cluster.name.keyring
+/etc/ceph/cluster.keyring
+/etc/ceph/keyring
+/etc/ceph/keyring.bin
+
+ The cluster metavariable is your Ceph cluster
+ name as defined by the name of the Ceph configuration file.
+ ceph.conf means that the cluster name is
+ ceph, thus ceph.keyring. The
+ name metavariable is the user type and user ID,
+ for example client.admin, thus
+ ceph.client.admin.keyring.
+
+
+ After you create a user (for example
+ client.ringo), you must get the
+ key and add it to a keyring on a Ceph client so that the user can access
+ the Ceph storage cluster.
+
+
+ details how to list, get, add,
+ modify and delete users directly in the Ceph storage cluster. However,
+ Ceph also provides the ceph-authtool utility to allow
+ you to manage keyrings from a Ceph client.
+
+
+ Creating a Keyring
+
+ When you use the procedures in to
+ create users, you need to provide user keys to the Ceph client(s) so
+ that the client can retrieve the key for the specified user and
+ authenticate with the Ceph storage cluster. Ceph clients access
+ keyrings to look up a user name and retrieve the user's key:
+
+sudo ceph-authtool --create-keyring /path/to/keyring
+
+ When creating a keyring with multiple users, we recommend using the
+ cluster name (for example cluster.keyring) for
+ the keyring file name and saving it in the /etc/ceph
+ directory so that the keyring configuration default setting will pick up
+ the file name without requiring you to specify it in the local copy of
+ your Ceph configuration file. For example, create
+ ceph.keyring by executing the following:
+
+sudo ceph-authtool -C /etc/ceph/ceph.keyring
+
+ When creating a keyring with a single user, we recommend using the cluster
+ name, the user type and the user name and saving it in the
+ /etc/ceph directory. For example,
+ ceph.client.admin.keyring for the
+ client.admin user.
+
+
+
+ Adding a User to a Keyring
+
+ When you add a user to the Ceph storage cluster (see
+ ), you can
+ retrieve the user, key and capabilities, and save the user to a keyring.
+
+
+ If you only want to use one user per keyring, the ceph auth
+ get command with the option will save the
+ output in the keyring file format. For example, to create a keyring for
+ the client.admin user, execute
+ the following:
+
+ceph auth get client.admin -o /etc/ceph/ceph.client.admin.keyring
+
+ When you want to import users to a keyring, you can use
+ ceph-authtool to specify the destination keyring and
+ the source keyring:
+
+sudo ceph-authtool /etc/ceph/ceph.keyring \
+ --import-keyring /etc/ceph/ceph.client.admin.keyring
+
+
+ Creating a User
+
+ Ceph provides the ceph auth add command to create a
+ user directly in the Ceph storage cluster. However, you can also create
+ a user, keys and capabilities directly on a Ceph client keyring. Then,
+ you can import the user to the Ceph storage cluster:
+
+sudo ceph-authtool -n client.ringo --cap osd 'allow rwx' \
+ --cap mon 'allow rwx' /etc/ceph/ceph.keyring
+
+ You can also create a keyring and add a new user to the keyring
+ simultaneously:
+
+sudo ceph-authtool -C /etc/ceph/ceph.keyring -n client.ringo \
+ --cap osd 'allow rwx' --cap mon 'allow rwx' --gen-key
+
+ In the previous scenarios, the new user
+ client.ringo is only in the
+ keyring. To add the new user to the Ceph storage cluster, you must still
+ add the new user to the cluster:
+
+sudo ceph auth add client.ringo -i /etc/ceph/ceph.keyring
+
+
+ Modifying Users
+
+ To modify the capabilities of a user record in a keyring, specify the
+ keyring and the user followed by the capabilities:
+
+sudo ceph-authtool /etc/ceph/ceph.keyring -n client.ringo \
+ --cap osd 'allow rwx' --cap mon 'allow rwx'
+
+ To update the modified user within the Ceph cluster environment, you
+ must import the changes from the keyring to the user entry in the Ceph
+ cluster:
+
+ceph auth import -i /etc/ceph/ceph.keyring
+
+ See for details
+ on updating a Ceph storage cluster user from a keyring.
+
+
+
+
+
+ Command Line Usage
+
+ The ceph command supports the following options related
+ to the user name and secret manipulation:
+
+
+
+ or
+
+
+
+ Ceph identifies users with a type and an ID
+ (TYPE.ID, such as
+ client.admin or
+ client.user1). The
+ , and
+ options enable you to specify the ID portion of the user name (for
+ example admin or
+ user1). You can specify the
+ user with the --id and omit the type. For example, to specify user
+ client.foo enter the following:
+
+ceph --id foo --keyring /path/to/keyring health
+ceph --user foo --keyring /path/to/keyring health
+
+
+
+ or
+
+
+
+ Ceph identifies users with a type and an ID
+ (TYPE.ID, such as
+ client.admin or
+ client.user1). The
+ and options enable you to
+ specify the fully qualified user name. You must specify the user type
+ (typically client) with the user ID:
+
+ceph --name client.foo --keyring /path/to/keyring health
+ceph -n client.foo --keyring /path/to/keyring health
+
+
+
+
+
+
+
+ The path to the keyring containing one or more user name and secret. The
+ option provides the same functionality, but it
+ does not work with RADOS Gateway, which uses for
+ another purpose. You may retrieve a keyring with ceph auth
+ get-or-create and store it locally. This is a preferred
+ approach, because you can switch user names without switching the
+ keyring path:
+
+sudo rbd map --id foo --keyring /path/to/keyring mypool/myimage
+
+
+
+
+
+
+
+ Stored Data Management
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES2.1
+
+
+
+ The CRUSH algorithm determines how to store and retrieve data by computing
+ data storage locations. CRUSH empowers Ceph clients to communicate with
+ OSDs directly rather than through a centralized server or broker. With an
+ algorithmically determined method of storing and retrieving data, Ceph
+ avoids a single point of failure, a performance bottleneck, and a physical
+ limit to its scalability.
+
+
+ CRUSH requires a map of your cluster, and uses the CRUSH map to
+ pseudo-randomly store and retrieve data in OSDs with a uniform distribution
+ of data across the cluster.
+
+
+ CRUSH maps contain a list of OSDs, a list of ‘buckets’ for aggregating
+ the devices into physical locations, and a list of rules that tell CRUSH how
+ it should replicate data in a Ceph cluster’s pools. By reflecting the
+ underlying physical organization of the installation, CRUSH can model—and
+ thereby address—potential sources of correlated device failures. Typical
+ sources include physical proximity, a shared power source, and a shared
+ network. By encoding this information into the cluster map, CRUSH placement
+ policies can separate object replicas across different failure domains while
+ still maintaining the desired distribution. For example, to address the
+ possibility of concurrent failures, it may be desirable to ensure that data
+ replicas are on devices using different shelves, racks, power supplies,
+ controllers, and/or physical locations.
+
+
+ When you create a configuration file and deploy Ceph with
+ ceph-deploy, Ceph generates a default CRUSH map for your
+ configuration. The default CRUSH map is fine for your Ceph sandbox
+ environment. However, when you deploy a large-scale data cluster, you should
+ give significant consideration to developing a custom CRUSH map, because it
+ will help you manage your Ceph cluster, improve performance and ensure data
+ safety.
+
+
+ For example, if an OSD goes down, a CRUSH map can help you locate the
+ physical data center, room, row and rack of the host with the failed OSD in
+ the event you need to use on-site support or replace hardware.
+
+
+ Similarly, CRUSH may help you identify faults more quickly. For example, if
+ all OSDs in a particular rack go down simultaneously, the fault may lie with
+ a network switch or power to the rack or the network switch rather than the
+ OSDs themselves.
+
+
+ A custom CRUSH map can also help you identify the physical locations where
+ Ceph stores redundant copies of data when the placement group(s) associated
+ with a failed host are in a degraded state.
+
+
+ There are three main sections to a CRUSH Map.
+
+
+
+
+ consist of any
+ object storage device, that is, the hard disk corresponding to a
+ ceph-osd daemon.
+
+
+
+
+ consist of a
+ hierarchical aggregation of storage locations (for example rows, racks,
+ hosts, etc.) and their assigned weights.
+
+
+
+
+ consist of the
+ manner of selecting buckets.
+
+
+
+
+ Devices
+
+
+ To map placement groups to OSDs, a CRUSH Map requires a list of OSD devices
+ (the name of the OSD daemon). The list of devices appears first in the
+ CRUSH Map.
+
+
+#devices
+device num osd.name
+
+
+ For example:
+
+
+#devices
+device 0 osd.0
+device 1 osd.1
+device 2 osd.2
+device 3 osd.3
+
+
+ As a general rule, an OSD daemon maps to a single disk.
+
+
+
+ Buckets
+
+
+ CRUSH maps contain a list of OSDs, which can be organized into 'buckets' for
+ aggregating the devices into physical locations.
+
+
+
+
+
+
+
+
+
+
+
+ 0
+
+
+
+
+ OSD
+
+
+
+
+ An OSD daemon (osd.1, osd.2, etc.).
+
+
+
+
+
+
+ 1
+
+
+
+
+ Host
+
+
+
+
+ A host name containing one or more OSDs.
+
+
+
+
+
+
+ 2
+
+
+
+
+ Chassis
+
+
+
+
+ Chassis of which the rack is composed.
+
+
+
+
+
+
+ 3
+
+
+
+
+ Rack
+
+
+
+
+ A computer rack. The default is unknownrack.
+
+
+
+
+
+
+ 4
+
+
+
+
+ Row
+
+
+
+
+ A row in a series of racks.
+
+
+
+
+
+
+ 5
+
+
+
+
+ Pdu
+
+
+
+
+ Power distribution unit.
+
+
+
+
+
+
+ 6
+
+
+
+
+ Pod
+
+
+
+
+
+
+
+
+
+ 7
+
+
+
+
+ Room
+
+
+
+
+ A room containing racks and rows of hosts.
+
+
+
+
+
+
+ 8
+
+
+
+
+ Data Center
+
+
+
+
+ A physical data center containing rooms.
+
+
+
+
+
+
+ 9
+
+
+
+
+ Region
+
+
+
+
+
+
+
+
+
+ 10
+
+
+
+
+ Root
+
+
+
+
+
+
+
+
+
+
+
+
+ You can remove these types and create your own bucket types.
+
+
+
+
+ Ceph’s deployment tools generate a CRUSH map that contains a bucket for
+ each host, and a pool named 'default', which is useful for the default
+ rbd pool. The remaining bucket types provide a means for
+ storing information about the physical location of nodes/buckets, which
+ makes cluster administration much easier when OSDs, hosts, or network
+ hardware malfunction and the administrator needs access to physical
+ hardware.
+
+
+
+ A bucket has a type, a unique name (string), a unique ID expressed as a
+ negative integer, a weight relative to the total capacity/capability of its
+ item(s), the bucket algorithm ( straw by default), and
+ the hash (0 by default, reflecting CRUSH Hash
+ rjenkins1). A bucket may have one or more items. The
+ items may consist of other buckets or OSDs. Items may have a weight that
+ reflects the relative weight of the item.
+
+
+[bucket-type] [bucket-name] {
+ id [a unique negative numeric ID]
+ weight [the relative capacity/capability of the item(s)]
+ alg [the bucket type: uniform | list | tree | straw ]
+ hash [the hash type: 0 by default]
+ item [item-name] weight [weight]
+}
+
+
+ The following example illustrates how you can use buckets to aggregate a
+ pool and physical locations like a data center, a room, a rack and a row.
+
+
+host ceph-osd-server-1 {
+ id -17
+ alg straw
+ hash 0
+ item osd.0 weight 1.00
+ item osd.1 weight 1.00
+}
+
+row rack-1-row-1 {
+ id -16
+ alg straw
+ hash 0
+ item ceph-osd-server-1 weight 2.00
+}
+
+rack rack-3 {
+ id -15
+ alg straw
+ hash 0
+ item rack-3-row-1 weight 2.00
+ item rack-3-row-2 weight 2.00
+ item rack-3-row-3 weight 2.00
+ item rack-3-row-4 weight 2.00
+ item rack-3-row-5 weight 2.00
+}
+
+rack rack-2 {
+ id -14
+ alg straw
+ hash 0
+ item rack-2-row-1 weight 2.00
+ item rack-2-row-2 weight 2.00
+ item rack-2-row-3 weight 2.00
+ item rack-2-row-4 weight 2.00
+ item rack-2-row-5 weight 2.00
+}
+
+rack rack-1 {
+ id -13
+ alg straw
+ hash 0
+ item rack-1-row-1 weight 2.00
+ item rack-1-row-2 weight 2.00
+ item rack-1-row-3 weight 2.00
+ item rack-1-row-4 weight 2.00
+ item rack-1-row-5 weight 2.00
+}
+
+room server-room-1 {
+ id -12
+ alg straw
+ hash 0
+ item rack-1 weight 10.00
+ item rack-2 weight 10.00
+ item rack-3 weight 10.00
+}
+
+datacenter dc-1 {
+ id -11
+ alg straw
+ hash 0
+ item server-room-1 weight 30.00
+ item server-room-2 weight 30.00
+}
+
+pool data {
+ id -10
+ alg straw
+ hash 0
+ item dc-1 weight 60.00
+ item dc-2 weight 60.00
+}
+
+
+ Rule Sets
+
+
+ CRUSH maps support the notion of 'CRUSH rules', which are the rules that
+ determine data placement for a pool. For large clusters, you will likely
+ create many pools where each pool may have its own CRUSH ruleset and rules.
+ The default CRUSH map has a rule for each pool, and one ruleset assigned to
+ each of the default pools.
+
+
+
+
+ In most cases, you will not need to modify the default rules. When you
+ create a new pool, its default ruleset is 0.
+
+
+
+
+ A rule takes the following form:
+
+
+rule rulename {
+
+ ruleset ruleset
+ type type
+ min_size min-size
+ max_size max-size
+ step step
+
+}
+
+
+
+ ruleset
+
+
+ An integer. Classifies a rule as belonging to a set of rules. Activated
+ by setting the ruleset in a pool. This option is required. Default is
+ 0.
+
+
+
+ You need to increase the ruleset number from the default 0 continuously,
+ otherwise the related monitor may crash.
+
+
+
+
+
+ type
+
+
+ A string. Describes a rule for either a hard disk (replicated) or a RAID.
+ This option is required. Default is replicated.
+
+
+
+
+ min_size
+
+
+ An integer. If a placement group makes fewer replicas than this number,
+ CRUSH will NOT select this rule. This option is required. Default is
+ 2.
+
+
+
+
+ max_size
+
+
+ An integer. If a placement group makes more replicas than this number,
+ CRUSH will NOT select this rule. This option is required. Default is
+ 10.
+
+
+
+
+ step take bucket
+
+
+
+ Takes a bucket name, and begins iterating down the tree. This option is
+ required.
+
+
+
+
+ step choose firstn num type bucket-type
+
+
+
+ Selects the number of buckets of the given type. Where N is the number of
+ options available, if num > 0 &&
+ < N, choose that many buckets; if num <
+ 0, it means N - num; and, if
+ num == 0, choose N buckets (all available).
+ Follows step take or step choose.
+
+
+
+
+ step emit
+
+
+ Outputs the current value and empties the stack. Typically used at the
+ end of a rule, but may also be used to form different trees in the same
+ rule. Follows step choose.
+
+
+
+
+
+
+
+ To activate one or more rules with a common ruleset number to a pool, set
+ the ruleset number to the pool.
+
+
+
+
+ CRUSH Map Manipulation
+
+
+ This section introduces ways to basic CRUSH Map manipulation, such as
+ editing a CRUSH Map, changing CRUSH Map parameters, and
+ adding/moving/removing an OSD.
+
+
+
+ Editing a CRUSH Map
+
+ To edit an existing CRUSH map, do the following:
+
+
+
+
+ Get a CRUSH Map. To get the CRUSH Map for your cluster, execute the
+ following:
+
+ceph osd getcrushmap -o
+compiled-crushmap-filename
+
+ Ceph will output () a compiled CRUSH Map to the
+ file name you specified. Since the CRUSH Map is in a compiled form, you
+ must decompile it first before you can edit it.
+
+
+
+
+ Decompile a CRUSH Map. To decompile a CRUSH Map, execute the following:
+
+crushtool -d compiled-crushmap-filename \
+ -o decompiled-crushmap-filename
+
+ Ceph will decompile () the compiled CRUSH map and
+ output () it to the file name you specified.
+
+
+
+
+ Edit at least one of Devices, Buckets and Rules parameters.
+
+
+
+
+ Compile a CRUSH Map. To compile a CRUSH Map, execute the following:
+
+crushtool -c decompiled-crush-map-filename \
+ -o compiled-crush-map-filename
+
+ Ceph will store a compiled CRUSH map to the file name you specified.
+
+
+
+
+ Set a CRUSH Map. To set the CRUSH Map for your cluster, execute the
+ following:
+
+ceph osd setcrushmap -i compiled-crushmap-filename
+
+ Ceph will input the compiled CRUSH Map of the file name you specified
+ as the CRUSH Map for the cluster.
+
+
+
+
+
+
+ Add/Move an OSD
+
+ To add or move an OSD in the CRUSH map of a running cluster, execute the
+ following:
+
+ceph osd crush set id_or_name weight root=pool-name
+bucket-type=bucket-name ...
+
+
+ id
+
+
+ An integer. The numeric ID of the OSD. This option is required.
+
+
+
+
+ name
+
+
+ A string. The full name of the OSD. This option is required.
+
+
+
+
+ weight
+
+
+ A double. The CRUSH weight for the OSD. This option is required.
+
+
+
+
+ pool
+
+
+ A key/value pair. By default, the CRUSH hierarchy contains the pool
+ default as its root. This option is required.
+
+
+
+
+ bucket-type
+
+
+ Key/value pairs. You may specify the OSD’s location in the CRUSH
+ hierarchy.
+
+
+
+
+
+ The following example adds osd.0 to the hierarchy, or
+ moves the OSD from a previous location.
+
+ceph osd crush set osd.0 1.0 root=data datacenter=dc1 room=room1 \
+row=foo rack=bar host=foo-bar-1
+
+
+
+ Adjust an OSD’s CRUSH Weight
+
+ To adjust an OSD’s crush weight in the CRUSH map of a running cluster,
+ execute the following:
+
+ceph osd crush reweight name weight
+
+
+ name
+
+
+ A string. The full name of the OSD. This option is required.
+
+
+
+
+ weight
+
+
+ A double. The CRUSH weight for the OSD. This option is required.
+
+
+
+
+
+
+
+ Remove an OSD
+
+ To remove an OSD from the CRUSH map of a running cluster, execute the
+ following:
+
+ceph osd crush remove name
+
+
+ name
+
+
+ A string. The full name of the OSD. This option is required.
+
+
+
+
+
+
+
+ Move a Bucket
+
+ To move a bucket to a different location or position in the CRUSH map
+ hierarchy, execute the following:
+
+ceph osd crush move bucket-name bucket-type=bucket-name, ...
+
+
+ bucket-name
+
+
+ A string. The name of the bucket to move/reposition. This option is
+ required.
+
+
+
+
+ bucket-type
+
+
+ Key/value pairs. You may specify the bucket’s location in the CRUSH
+ hierarchy.
+
+
+
+
+
+
+
+ Mixed SSDs and HDDs on the Same Node
+
+
+ It can be desirable to configure a Ceph cluster such that each node has a
+ mix of SSDs and HDDs, with one storage pool on the fast SSDs and one storage
+ pool on the slower HDDs. To do this, the CRUSH Map needs to be edited.
+
+
+
+ The default CRUSH Map will have a simple hierarchy, where the default root
+ contains hosts, and the hosts contain OSDs, for example:
+
+
+cephadm > ceph osd tree
+ID WEIGHT TYPE NAME UP/DOWN REWEIGHT
+ -1 0.18494 root default
+ -2 0.05548 host node1
+ 0 0.01849 osd.0 up 1.00000
+ 3 0.01849 osd.3 up 1.00000
+ 6 0.01849 osd.6 up 1.00000
+ -3 0.05548 host node2
+ 1 0.01849 osd.1 up 1.00000
+ 4 0.01849 osd.4 up 1.00000
+ 7 0.01849 osd.7 up 1.00000
+ -4 0.05548 host node3
+ 2 0.01849 osd.2 up 1.00000
+ 5 0.01849 osd.5 up 1.00000
+ 8 0.01849 osd.8 up 1.00000
+
+
+ This provides no distinction between disk types. In order to split the OSDs
+ into SSDs and HDDs, we need to create a second hierarchy in the CRUSH Map:
+
+
+cephadm > ceph osd crush add-bucket ssd root
+
+
+ Having created the new root for SSDs, we need to add hosts to it. This means
+ creating new host entries. But because the same host name cannot appear more
+ than once in a CRUSH Map, this uses fake host names. These fake host names
+ do not need to be resolvable by DNS. CRUSH does not care what the host names
+ are, they only need to create the right hierarchies. The one thing that
+ does need to be changed in order to support fake host
+ names is that you must set
+
+
+osd crush update on start = false
+
+
+ in /etc/ceph/ceph.conf. Otherwise the OSDs you move
+ will be reset later to their original location in the default root, and the
+ cluster will not behave as expected.
+
+
+
+ Once that setting is changed, add the new fake hosts to the SSD root:
+
+
+cephadm > ceph osd crush add-bucket node1-ssd host
+cephadm > ceph osd crush move node1-ssd root=ssd
+cephadm > ceph osd crush add-bucket node2-ssd host
+cephadm > ceph osd crush move node2-ssd root=ssd
+cephadm > ceph osd crush add-bucket node3-ssd host
+cephadm > ceph osd crush move node3-ssd root=ssd
+
+
+ Finally, for each SSD OSD, move the OSD to the SSD root. In this example, we
+ assume that osd.0, osd.1 and osd.2 are physically hosted on SSDs:
+
+
+cephadm > ceph osd crush add osd.0 1 root=ssd
+cephadm > ceph osd crush set osd.0 1 root=ssd host=node1-ssd
+cephadm > ceph osd crush add osd.1 1 root=ssd
+cephadm > ceph osd crush set osd.1 1 root=ssd host=node2-ssd
+cephadm > ceph osd crush add osd.2 1 root=ssd
+cephadm > ceph osd crush set osd.2 1 root=ssd host=node3-ssd
+
+
+ The CRUSH hierarchy should now look like this:
+
+
+cephadm > ceph osd tree
+ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
+-5 3.00000 root ssd
+-6 1.00000 host node1-ssd
+ 0 1.00000 osd.0 up 1.00000 1.00000
+-7 1.00000 host node2-ssd
+ 1 1.00000 osd.1 up 1.00000 1.00000
+-8 1.00000 host node3-ssd
+ 2 1.00000 osd.2 up 1.00000 1.00000
+-1 0.11096 root default
+-2 0.03699 host node1
+ 3 0.01849 osd.3 up 1.00000 1.00000
+ 6 0.01849 osd.6 up 1.00000 1.00000
+-3 0.03699 host node2
+ 4 0.01849 osd.4 up 1.00000 1.00000
+ 7 0.01849 osd.7 up 1.00000 1.00000
+-4 0.03699 host node3
+ 5 0.01849 osd.5 up 1.00000 1.00000
+ 8 0.01849 osd.8 up 1.00000 1.00000
+
+
+ Now, create a CRUSH rule that targets the SSD root:
+
+
+cephadm > ceph osd crush rule create-simple ssd_replicated_ruleset ssd host
+
+
+ The original default (with ID 0) will
+ target the HDDs. The new (with ID 1)
+ will target the SSDs.
+
+
+
+ Any existing pools will still be using the HDDs, because they are in the
+ default hierarchy in the CRUSH map. A new pool can be created to use SSDs
+ only:
+
+
+cephadm > ceph osd pool create ssd-pool 64 64
+cephadm > ceph osd pool set ssd-pool crush_ruleset 1
+
+
+ The ID "1" in the above command needs to match the ID of the new CRUSH role
+ which targets the SSDs.
+
+
+
+
+ Managing Storage Pools
+
+ When you first deploy a cluster without creating a pool, Ceph uses the
+ default pools for storing data. A pool provides you with:
+
+
+
+
+ Resilience: You can set how many OSDs are allowed to
+ fail without losing data. For replicated pools, it is the desired number of
+ copies/replicas of an object. A typical configuration stores an object and
+ one additional copy (that is size=2), but you can
+ determine the number of copies/replicas. For erasure coded pools, it is the
+ number of coding chunks (that is m=2 in the erasure
+ code profile).
+
+
+
+
+ Placement Groups: You can set the number of placement
+ groups for the pool. A typical configuration uses approximately 100
+ placement groups per OSD to provide optimal balancing without using up too
+ many computing resources. When setting up multiple pools, be careful to
+ ensure you set a reasonable number of placement groups for both the pool
+ and the cluster as a whole.
+
+
+
+
+ CRUSH Rules: When you store data in a pool, a CRUSH
+ ruleset mapped to the pool enables CRUSH to identify a rule for the
+ placement of the object and its replicas (or chunks for erasure coded
+ pools) in your cluster. You can create a custom CRUSH rule for your pool.
+
+
+
+
+ Snapshots: When you create snapshots with
+ ceph osd pool mksnap, you effectively take a snapshot of
+ a particular pool.
+
+
+
+
+ Set Ownership: You can set a user ID as the owner of a
+ pool.
+
+
+
+
+ To organize data into pools, you can list, create, and remove pools. You can
+ also view the usage statistics for each pool.
+
+
+ Operating Pools
+
+
+ This section introduces practical information to perform basic tasks with
+ pools. You can find out how to list, create, and delete pools, as well as
+ show pool statistics or manage snapshots of a pool.
+
+
+
+ List Pools
+
+ To list your cluster’s pools, execute:
+
+ceph osd lspools
+0 rbd, 1 photo_collection, 2 foo_pool,
+
+
+
+ Create a Pool
+
+ To create a replicated pool, execute:
+
+ceph osd pool create pool_name pg_num pgp_num pgp_type crush_ruleset_name, expected_num_objects
+
+ To create an erasure pool, execute:
+
+ceph osd pool create pool_name pg_num pgp_num pgp_type erasure_code_profile \
+ crush_ruleset_name, expected_num_objects
+
+
+ pool_name
+
+
+ The name of the pool. It must be unique. This option is required.
+
+
+
+
+ pg_num
+
+
+ The total number of placement groups for the pool. This option is
+ required. Default value is 8.
+
+
+
+
+ pgp_num
+
+
+ The total number of placement groups for placement purposes. This should
+ be equal to the total number of placement groups, except for placement
+ group splitting scenarios. This option is required. Default value is 8.
+
+
+
+
+ pgp_type
+
+
+ The pool type which may either be replicated to
+ recover from lost OSDs by keeping multiple copies of the objects or
+ erasure to get a kind of generalized RAID5
+ capability. The replicated pools require more raw storage but implement
+ all Ceph operations. The erasure pools require less raw storage but
+ only implement a subset of the available operations. Default is
+ 'replicated'.
+
+
+
+
+ crush_ruleset_name
+
+
+ The name of the crush ruleset for this pool. If the specified ruleset
+ does not exist, the creation of replicated pool will fail with -ENOENT.
+ But replicated pool will create a new erasure ruleset with specified
+ name. The default value is 'erasure-code' for erasure pool. Picks up
+ Ceph configuration variable
+ for
+ replicated pool.
+
+
+
+
+ erasure_code_profile=profile
+
+
+ For erasure pools only. Use the erasure code profile. It must be an
+ existing profile as defined by osd erasure-code-profile
+ set.
+
+
+ When you create a pool, set the number of placement groups to a
+ reasonable value (for example 100). Consider the total number of
+ placement groups per OSD too. Placement groups are computationally
+ expensive, so performance will degrade when you have many pools with
+ many placement groups (for example 50 pools with 100 placement groups
+ each). The point of diminishing returns depends upon the power of the
+ OSD host.
+
+
+ See
+ Placement
+ Groups for details on calculating an appropriate number of
+ placement groups for your pool.
+
+
+
+
+ expected_num_objects
+
+
+ The expected number of objects for this pool. By setting this value, the
+ PG folder splitting happens at the pool creation time. This avoids the
+ latency impact with a runtime folder splitting.
+
+
+
+
+
+
+
+ Set Pool Quotas
+
+ You can set pool quotas for the maximum number of bytes and/or the maximum
+ number of objects per pool.
+
+ceph osd pool set-quota pool-name max_objects obj-count max_bytes bytes
+
+ For example:
+
+ceph osd pool set-quota data max_objects 10000
+
+ To remove a quota, set its value to 0.
+
+
+
+
+ Delete a Pool
+
+ To delete a pool, execute:
+
+ceph osd pool delete pool-name pool-name --yes-i-really-really-mean-it
+
+ If you created your own rulesets and rules for a pool you created, you
+ should consider removing them when you no longer need your pool. If you
+ created users with permissions strictly for a pool that no longer exists,
+ you should consider deleting those users too.
+
+
+
+
+ Rename a Pool
+
+ To rename a pool, execute:
+
+ceph osd pool rename current-pool-name new-pool-name
+
+ If you rename a pool and you have per-pool capabilities for an
+ authenticated user, you must update the user’s capabilities with the new
+ pool name.
+
+
+
+
+ Show Pool Statistics
+
+ To show a pool’s usage statistics, execute:
+
+rados df
+pool name category KB objects lones degraded unfound rd rd KB wr wr KB
+cold-storage - 228 1 0 0 0 0 0 1 228
+data - 1 4 0 0 0 0 0 4 4
+hot-storage - 1 2 0 0 0 15 10 5 231
+metadata - 0 0 0 0 0 0 0 0 0
+pool1 - 0 0 0 0 0 0 0 0 0
+rbd - 0 0 0 0 0 0 0 0 0
+total used 266268 7
+total avail 27966296
+total space 28232564
+
+
+
+ Set Pool Values
+
+ To set a value to a pool, execute:
+
+ceph osd pool set pool-name key value
+
+ You may set values for the following keys:
+
+
+
+ size
+
+
+ Sets the number of replicas for objects in the pool. See
+ for further
+ details. Replicated pools only.
+
+
+
+
+ min_size
+
+
+ Sets the minimum number of replicas required for I/O. See
+ for further
+ details. Replicated pools only.
+
+
+
+
+ crash_replay_interval
+
+
+ The number of seconds to allow clients to replay acknowledged, but
+ uncommitted requests.
+
+
+
+
+ pg_num
+
+
+ The number of placement groups for the pool.
+
+
+
+
+ pgp_num
+
+
+ The effective number of placement groups to use when calculating data
+ placement.
+
+
+
+
+ crush_ruleset
+
+
+ The ruleset to use for mapping object placement in the cluster.
+
+
+
+
+ hashpspool
+
+
+ Set (1) or unset (0) the HASHPSPOOL flag on a given pool. Enabling this
+ flag changes the algorithm to better distribute PGs to OSDs. After
+ enabling this flag on a pool whose HASHPSPOOL flag was set to 0, the
+ cluster starts backfilling to have a correct placement of all PGs again.
+ Be aware that this can create quite substantial I/O load on a cluster,
+ so good planning must be done on a highly loaded production clusters.
+
+
+
+
+ nodelete
+
+
+ Prevents the pool from being removed.
+
+
+
+
+ nopgchange
+
+
+ Prevents the pool's and
+ from being changed.
+
+
+
+
+ nosizechange
+
+
+ Prevents the pool's size from being changed.
+
+
+
+
+ write_fadvise_dontneed
+
+
+ Set/Unset the WRITE_FADVISE_DONTNEED flag on a given
+ pool.
+
+
+
+
+ noscrub,nodeep-scrub
+
+
+ Disables (deep)-scrubbing of the data for the specific pool to resolve
+ temporary high I/O load.
+
+
+
+
+ hit_set_type
+
+
+ Enables hit set tracking for cache pools. See
+ Bloom
+ Filter for additional information. This option can have the
+ following values: bloom,
+ explicit_hash, explicit_object.
+ Default is bloom.
+
+
+
+
+ hit_set_count
+
+
+ The number of hit sets to store for cache pools. The higher the number,
+ the more RAM consumed by the ceph-osd daemon.
+ Default is 0.
+
+
+
+
+ hit_set_period
+
+
+ The duration of a hit set period in seconds for cache pools. The higher
+ the number, the more RAM consumed by the
+ ceph-osd daemon.
+
+
+
+
+ hit_set_fpp
+
+
+ The false positive probability for the bloom hit set type. See
+ Bloom
+ Filter for additional information. Valid range is 0.0 - 1.0
+ Default is 0.05
+
+
+
+
+ cache_target_dirty_ratio
+
+
+ The percentage of the cache pool containing modified (dirty) objects
+ before the cache tiering agent will flush them to the backing storage
+ pool. Default is .4
+
+
+
+
+ cache_target_dirty_high_ratio
+
+
+ The percentage of the cache pool containing modified (dirty) objects
+ before the cache tiering agent will flush them to the backing storage
+ pool with a higher speed. Default is .6.
+
+
+
+
+ cache_target_full_ratio
+
+
+ The percentage of the cache pool containing unmodified (clean) objects
+ before the cache tiering agent will evict them from the cache pool.
+ Default is .8
+
+
+
+
+ target_max_bytes
+
+
+ Ceph will begin flushing or evicting objects when the
+ threshold is triggered.
+
+
+
+
+ target_max_objects
+
+
+ Ceph will begin flushing or evicting objects when the
+ threshold is triggered.
+
+
+
+
+ hit_set_grade_decay_rate
+
+
+ Temperature decay rate between two successive
+ hit_sets. Default is 20.
+
+
+
+
+ hit_set_grade_search_last_n
+
+
+ Count at most N appearances in
+ hit_sets for temperature calculation. Default is
+ 1.
+
+
+
+
+ cache_min_flush_age
+
+
+ The time (in seconds) before the cache tiering agent will flush an
+ object from the cache pool to the storage pool.
+
+
+
+
+ cache_min_evict_age
+
+
+ The time (in seconds) before the cache tiering agent will evict an
+ object from the cache pool.
+
+
+
+
+ fast_read
+
+
+ If this flag is enabled on erasure coding pools, then the read request
+ issues sub-reads to all shards, and waits until it receives enough
+ shards to decode to serve the client. In the case of
+ jerasure and isa erasure
+ plug-ins, when the first K replies return, then the
+ client’s request is served immediately using the data decoded from
+ these replies. This helps to gain some resources for better performance.
+ Currently, this flag is only supported for erasure coding pools. Default
+ is 0.
+
+
+
+
+ scrub_min_interval
+
+
+ The minimum interval in seconds for pool scrubbing when the cluster load
+ is low. The default 0 means that the
+ value from the Ceph
+ configuration file is used.
+
+
+
+
+ scrub_max_interval
+
+
+ The maximum interval in seconds for pool scrubbing, regardless of the
+ cluster load. The default 0 means that the
+ value from the Ceph
+ configuration file is used.
+
+
+
+
+ deep_scrub_interval
+
+
+ The interval in seconds for the pool deep
+ scrubbing. The default 0 means that the
+ value from the Ceph configuration file
+ is used.
+
+
+
+
+
+
+
+ Get Pool Values
+
+ To get a value from a pool, execute:
+
+ceph osd pool get pool-name key
+
+ You can get values for keys listed in
+ plus the following keys:
+
+
+
+ pg_num
+
+
+ The number of placement groups for the pool.
+
+
+
+
+ pgp_num
+
+
+ The effective number of placement groups to use when calculating data
+ placement. Valid range is equal to or less than
+ pg_num.
+
+
+
+
+
+
+
+ Set the Number of Object Replicas
+
+ To set the number of object replicas on a replicated pool, execute the
+ following:
+
+ceph osd pool set poolname size num-replicas
+
+
+ The num-replicas includes the object itself. If
+ you for example want the object and two copies of the object for a total
+ of three instances of the object, specify 3.
+
+
+
+ For example:
+
+ceph osd pool set data size 3
+
+ You may execute this command for each pool.
+
+
+
+ An object might accept I/Os in degraded mode with fewer than pool
+ size replicas. To set a minimum number of required replicas for
+ I/O, you should use the min_size setting. For example:
+
+ceph osd pool set data min_size 2
+
+ This ensures that no object in the data pool will receive I/O with fewer
+ than min_size replicas.
+
+
+
+
+
+ Get the Number of Object Replicas
+
+ To get the number of object replicas, execute the following:
+
+ceph osd dump | grep 'replicated size'
+
+ Ceph will list the pools, with the replicated size
+ attribute highlighted. By default, Ceph creates two replicas of an object
+ (a total of three copies, or a size of 3).
+
+
+
+
+
+ Snapshots
+
+ A snapshot is a read-only copy of the state of an object—a pool or an
+ image—at a particular point in time. This way you can retain a history
+ of its state. There are two types of snapshots in Ceph—RBD snapshots
+ and pool snapshots.
+
+
+ RBD Snapshots
+
+
+ RBD snapshot is a snapshot of a RADOS block device image. With snapshots you
+ retain a history of the image’s state. Ceph also supports snapshot
+ layering, which allows you to clone VM images quickly and easily. Ceph
+ supports block device snapshots using the rbd command and
+ many higher level interfaces, including QEMU,
+ libvirt, OpenStack and CloudStack.
+
+
+
+
+ Stop input/output operations before snapshotting an image. If the image
+ contains a file system, the file system must be in a consistent state
+ before snapshotting.
+
+
+
+
+ Cephx Notes
+
+ When cephx is enabled (see
+
+ for more information), you must specify a user name or ID and a path to the
+ keyring containing the corresponding key for the user. See
+ User
+ Management for more details. You may also add the
+ CEPH_ARGS environment variable to avoid re-entry
+ of the following parameters.
+
+rbd --id user-ID --keyring=/path/to/secret commands
+rbd --name username --keyring=/path/to/secret commands
+
+ For example:
+
+rbd --id admin --keyring=/etc/ceph/ceph.keyring commands
+rbd --name client.admin --keyring=/etc/ceph/ceph.keyring commands
+
+
+ Add the user and secret to the CEPH_ARGS
+ environment variable so that you do not need to enter them each time.
+
+
+
+
+
+ Snapshot Basics
+
+ The following procedures demonstrate how to create, list, and remove
+ snapshots using the rbd command on the command line.
+
+
+ Create Snapshot
+
+ To create a snapshot with rbd, specify the option, the pool name and the image name.
+
+rbd --pool pool-name snap create --snap snap-name image-name
+rbd snap create pool-name/image-name@snap-name
+
+ For example:
+
+rbd --pool rbd snap create --snap snapshot1 image1
+rbd snap create rbd/image1@snapshot1
+
+
+ List Snapshots
+
+ To list snapshots of an image, specify the pool name and the image name.
+
+rbd --pool pool-name snap ls image-name
+rbd snap ls pool-name/image-name
+
+ For example:
+
+rbd --pool rbd snap ls image1
+rbd snap ls rbd/image1
+
+
+ Rollback Snapshot
+
+ To rollback to a snapshot with rbd, specify the
+ option, the pool name, the image name and
+ the snap name.
+
+rbd --pool pool-name snap rollback --snap snap-name image-name
+rbd snap rollback pool-name/image-name@snap-name
+
+ For example:
+
+rbd --pool pool1 snap rollback --snap snapshot1 image1
+rbd snap rollback pool1/image1@snapshot1
+
+
+ Rolling back an image to a snapshot means overwriting the current version
+ of the image with data from a snapshot. The time it takes to execute a
+ rollback increases with the size of the image. It is faster to
+ clone from a snapshot than to rollback an
+ image to a snapshot, and it is the preferred method of returning to a
+ pre-existing state.
+
+
+
+
+ Delete a Snapshot
+
+ To delete a snapshot with rbd, specify the option, the pool name, the image name and the user name.
+
+rbd --pool pool-name snap rm --snap snap-name image-name
+rbd snap rm pool-name/image-name@snap-name
+
+ For example:
+
+rbd --pool pool1 snap rm --snap snapshot1 image1
+rbd snap rm pool1/imag1@snapshot1
+
+
+ Ceph OSDs delete data asynchronously, so deleting a snapshot does not
+ free up the disk space immediately.
+
+
+
+
+ Purge Snapshots
+
+ To delete all snapshots for an image with rbd, specify
+ the option and the image name.
+
+rbd --pool pool-name snap purge image-name
+rbd snap purge pool-name/image-name
+
+ For example:
+
+rbd --pool pool1 snap purge image1
+rbd snap purge pool1/image1
+
+
+
+
+ Layering
+
+ Ceph supports the ability to create many copy-on-write (COW) clones of a
+ block device snapshot. Snapshot layering enables Ceph block device
+ clients to create images very quickly. For example, you might create a
+ block device image with a Linux VM written to it; then, snapshot the image,
+ protect the snapshot, and create as many copy-on-write clones as you like.
+ A snapshot is read-only, so cloning a snapshot simplifies
+ semantics—making it possible to create clones rapidly.
+
+
+
+ The terms “parent” and “child” mentioned in the command line
+ examples below mean a Ceph block device snapshot (parent), and the
+ corresponding image cloned from the snapshot (child).
+
+
+
+ Each cloned image (child) stores a reference to its parent image, which
+ enables the cloned image to open the parent snapshot and read it.
+
+
+ A COW clone of a snapshot behaves exactly like any other Ceph block
+ device image. You can read to, write from, clone, and resize cloned images.
+ There are no special restrictions with cloned images. However, the
+ copy-on-write clone of a snapshot refers to the snapshot, so you
+ must protect the snapshot before you clone it.
+
+
+
+ Ceph only supports cloning for format 2 images
+ (that is created with rbd create --image-format 2).
+
+
+
+ Getting Started with Layering
+
+ Ceph block device layering is a simple process. You must have an image.
+ You must create a snapshot of the image. You must protect the snapshot.
+ Once you have performed these steps, you can begin cloning the snapshot.
+
+
+ The cloned image has a reference to the parent snapshot, and includes the
+ pool ID, image ID and snapshot ID. The inclusion of the pool ID means that
+ you may clone snapshots from one pool to images in another pool.
+
+
+
+
+ Image Template: A common use case for block device
+ layering is to create a master image and a snapshot that serves as a
+ template for clones. For example, a user may create an image for a Linux
+ distribution (for example SUSE Linux Enterprise Server), and create a snapshot for it.
+ Periodically, the user may update the image and create a new snapshot
+ (for example zypper ref && zypper patch
+ followed by rbd snap create). As the image matures,
+ the user can clone any one of the snapshots.
+
+
+
+
+ Extended Template: A more advanced use case
+ includes extending a template image that provides more information than
+ a base image. For example, a user may clone an image (a VM template) and
+ install other software (for example a database, a content management
+ system, an analytics system, etc.) and then snapshot the extended image,
+ which itself may be updated same as the base image.
+
+
+
+
+ Template Pool: One way to use block device layering
+ is to create a pool that contains master images that act as templates,
+ and snapshots of those templates. You may then extend read-only
+ privileges to users so that they may clone the snapshots without the
+ ability to write or execute within the pool.
+
+
+
+
+ Image Migration/Recovery: One way to use block
+ device layering is to migrate or recover data from one pool into another
+ pool.
+
+
+
+
+
+ Protecting a Snapshot
+
+ Clones access the parent snapshots. All clones would break if a user
+ inadvertently deleted the parent snapshot. To prevent data loss, you need
+ to protect the snapshot before you can clone it.
+
+rbd --pool pool-name snap protect \
+ --image image-name --snap snapshot-name
+rbd snap protect pool-name/image-name@snapshot-name
+
+ For example:
+
+rbd --pool pool1 snap protect --image image1 --snap snapshot1
+rbd snap protect pool1/image1@snapshot1
+
+
+ You cannot delete a protected snapshot.
+
+
+
+
+ Cloning a Snapshot
+
+ To clone a snapshot, you need to specify the parent pool, image and
+ snapshot, the child pool and image name. You must protect the snapshot
+ before you can clone it.
+
+rbd --pool pool-name --image parent-image \
+ --snap snap-name --dest-pool pool-name \
+ --dest child-image
+rbd clone
+pool-name/parent-image@snap-name \
+ pool-name/child-image-name
+
+ For example:
+
+rbd clone pool1/image1@snapshot1 pool1/image2
+
+
+ You may clone a snapshot from one pool to an image in another pool. For
+ example, you may maintain read-only images and snapshots as templates in
+ one pool, and writable clones in another pool.
+
+
+
+
+ Unprotecting a Snapshot
+
+ Before you can delete a snapshot, you must unprotect it first.
+ Additionally, you may not delete snapshots that have
+ references from clones. You must flatten each clone of a snapshot, before
+ you can delete the snapshot.
+
+rbd --pool pool-name snap unprotect --image image-name \
+ --snap snapshot-name
+rbd snap unprotect pool-name/image-name@snapshot-name
+
+ For example:
+
+rbd --pool pool1 snap unprotect --image image1 --snap snapshot1
+rbd snap unprotect pool1/image1@snapshot1
+
+
+ Listing Children of a Snapshot
+
+ To list the children of a snapshot, execute the following:
+
+rbd --pool pool-name children --image image-name --snap snap-name
+rbd children pool-name/image-name@snapshot-name
+
+ For example:
+
+rbd --pool pool1 children --image image1 --snap snapshot1
+rbd children pool1/image1@snapshot1
+
+
+ Flattening a Cloned Image
+
+ Cloned images retain a reference to the parent snapshot. When you remove
+ the reference from the child clone to the parent snapshot, you effectively
+ “flatten” the image by copying the information from the snapshot to
+ the clone. The time it takes to flatten a clone increases with the size of
+ the snapshot. To delete a snapshot, you must flatten the child images
+ first.
+
+rbd --pool pool-name flatten --image image-name
+rbd flatten pool-name/image-name
+
+ For example:
+
+rbd --pool pool1 flatten --image image1
+rbd flatten pool1/image1
+
+
+ Since a flattened image contains all the information from the snapshot, a
+ flattened image will take up more storage space than a layered clone.
+
+
+
+
+
+
+ Pool Snapshots
+
+
+ Pool snapshots are snapshots of the state of the whole Ceph pool. With
+ pool snapshots, you can retain the history of the pool's state. Depending on
+ the pool's size, creating pool snapshots may require a lot of storage space.
+ Always check the related storage for enough disk space before creating a
+ snapshot of a pool.
+
+
+
+ Make a Snapshot of a Pool
+
+ To make a snapshot of a pool, execute:
+
+ceph osd pool mksnap pool-name snap-name
+
+ For example:
+
+ceph osd pool mksnap pool1 snapshot1
+created pool pool1 snap snapshot1
+
+
+
+ Remove a Snapshot of a Pool
+
+ To remove a snapshot of a pool, execute:
+
+ceph osd pool rmsnap pool-name snap-name
+
+
+
+
+ Erasure Coded Pools
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES4
+
+
+
+ A Ceph pool is associated to a type to sustain the loss of an OSD (that is
+ a disk since most of the time there is one OSD per disk). The default choice
+ when creating a pool is replicated, meaning every object is copied on
+ multiple disks. The Erasure Code pool type can be used instead to save space.
+
+
+ For background information on Erasure Code, see
+ .
+
+
+
+ You cannot access erasure coded pools with the rbd interface unless you have
+ a cache tier configured. Refer to for
+ more details.
+
+
+
+ Creating a Sample Erasure Coded Pool
+
+
+ The simplest erasure coded pool is equivalent to RAID5 and requires at least
+ three hosts:
+
+
+> ceph osd pool create ecpool 12 12 erasure
+pool 'ecpool' created
+> echo ABCDEFGHI | rados --pool ecpool put NYAN -
+> rados --pool ecpool get NYAN -
+ABCDEFGHI
+
+
+ The 12 in the pool create command
+ stands for the number of placement groups.
+
+
+
+ Erasure Code Profiles
+
+
+ Some terminology hints:
+
+
+
+
+ chunk
+
+
+ when the encoding function is called, it returns chunks of the same size:
+ data chunks which can be concatenated to reconstruct the original object
+ and coding chunks which can be used to rebuild a lost chunk.
+
+
+
+
+ k
+
+
+ the number of data chunks, that is the number of chunks into which the
+ original object is divided. For example if k = 2 a
+ 10KB object will be divided into k objects of 5KB
+ each.
+
+
+
+
+ m
+
+
+ the number of coding chunks, that is the number of additional chunks
+ computed by the encoding functions. If there are 2 coding chunks, it
+ means 2 OSDs can be out without losing data.
+
+
+
+
+
+
+ The default erasure code profile sustains the loss of a single OSD. It is
+ equivalent to a replicated pool of size two but requires 1.5TB instead of
+ 2TB to store 1TB of data. The default profile can be displayed with:
+
+
+> ceph osd erasure-code-profile get default
+directory=.libs
+k=2
+m=1
+plugin=jerasure
+ruleset-failure-domain=host
+technique=reed_sol_van
+
+
+ Choosing the right profile is important because it cannot be modified after
+ the pool is created: a new pool with a different profile needs to be created
+ and all objects from the previous pool moved to the new.
+
+
+
+ The most important parameters of the profile are k,
+ m and ruleset-failure-domain because
+ they define the storage overhead and the data durability. For example, if
+ the desired architecture must sustain the loss of two racks with a storage
+ overhead of 40% overhead, the following profile can be defined:
+
+
+> ceph osd erasure-code-profile set myprofile \
+ k=3 \
+ m=2 \
+ ruleset-failure-domain=rack
+> ceph osd pool create ecpool 12 12 erasure myprofile
+> echo ABCDEFGHI | rados --pool ecpool put NYAN -
+> rados --pool ecpool get NYAN -
+ABCDEFGHI
+
+
+ The NYAN object will be divided in three (k=3) and two
+ additional chunks will be created (m=2). The value of
+ m defines how many OSDs can be lost simultaneously
+ without losing any data. The ruleset-failure-domain=rack
+ will create a CRUSH ruleset that ensures no two chunks are stored in the
+ same rack.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ For more information about the erasure code profiles, see
+ .
+
+
+
+ Erasure Coded Pool And Cache Tiering
+
+
+ Erasure coded pools require more resources than replicated pools and lack
+ some functionalities such as partial writes. To overcome these limitations,
+ it is recommended to set a cache tier before the erasure coded pool.
+
+
+
+ For example, if the hot-storage
pool is made of fast storage:
+
+
+> ceph osd tier add ecpool hot-storage
+> ceph osd tier cache-mode hot-storage writeback
+> ceph osd tier set-overlay ecpool hot-storage
+
+
+ This will place the hot-storage
pool as tier of ecpool in
+ write-back mode so that every write and read to the ecpool is actually using
+ the hot-storage and benefits from its flexibility and speed.
+
+
+
+ It is not possible to create an RBD image on an erasure coded pool because
+ it requires partial writes. It is however possible to create an RBD image on
+ an erasure coded pool when a replicated pool tier set a cache tier:
+
+
+> rbd --pool ecpool create --size 10 myvolume
+
+
+ For more information about cache tiering, see
+ .
+
+
+
+
+
+ Cache Tiering
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES4
+
+
+
+ A cache tier is an additional storage layer implemented
+ between the client and the standard storage. It is designed to speed up the
+ access to pools stored on slow hard disks and erasure coded pools.
+
+
+ Typically cache tiering involves creating a pool of relatively fast/expensive
+ storage devices (for example SSD drives) configured to act as a cache tier,
+ and a backing pool of slower and cheaper devices configured to act as a
+ storage tier.
+
+
+ Tiered Storage Terminology
+
+
+ Cache tiering recognizes two types of pools: a cache
+ pool and a storage pool.
+
+
+
+
+ For general information on pools, see .
+
+
+
+
+
+ storage pool
+
+
+ Either a standard replicated pool that stores several copies of an object
+ in the Ceph storage cluster, or an erasure coded pool (see
+ ).
+
+
+ The storage pool is sometimes referred to as a 'backing' or 'cold'
+ storage.
+
+
+
+
+ cache pool
+
+
+ A standard replicated pool stored on a relatively small but fast storage
+ device with their own ruleset in a crush map.
+
+
+ The cache pool is also referred to as a 'hot' storage.
+
+
+
+
+
+
+ Points to Consider
+
+
+ Cache tiering may degrade the cluster performance for
+ specific workloads. The following points show some of its aspects you need
+ to consider:
+
+
+
+
+
+ Workload dependent: Whether a cache will improve
+ performance is dependent on the workload. Because there is a cost
+ associated with moving objects into or out of the cache, it can be more
+ effective when most of the requests touch a small number of objects. The
+ cache pool should be large enough to capture the working set for your
+ workload to avoid thrashing.
+
+
+
+
+ Difficult to benchmark: Most performance benchmarks
+ may show low performance with cache tiering. The reason is that they
+ request a big set of objects, and it takes a long time for the cache to
+ 'warm up'.
+
+
+
+
+ Possibly low performance: For workloads that are not
+ suitable for cache tiering, performance is often slower than a normal
+ replicated pool without cache tiering enabled.
+
+
+
+
+ librados object enumeration:
+ If your application is using librados directly
+ and relies on object enumeration, cache tiering may not work as expected.
+ (This is not a problem for RADOS Gateway, RBD, or CephFS.)
+
+
+
+
+
+ When to Use Cache Tiering
+
+
+ Consider using cache tiering in the following cases:
+
+
+
+
+
+ You need to access erasure coded pools via RADOS block device (RBD).
+
+
+
+
+ You need to access erasure coded pools via iSCSI as it inherits the
+ limitations of RBD. For more information on iSCSI, refer to
+ .
+
+
+
+
+ You have a limited number of high performance storage and a large
+ collection of low performance storage, and need to access the stored data
+ faster.
+
+
+
+
+
+ Cache Modes
+
+
+ The cache tiering agent handles the migration of data between the cache tier
+ and the backing storage tier. Administrators have the ability to configure
+ how this migration takes place. There are two main scenarios:
+
+
+
+
+ write-back mode
+
+
+ When administrators configure tiers with write-back mode, Ceph clients
+ write data to the cache tier and receive an ACK from the cache tier. In
+ time, the data written to the cache tier migrates to the storage tier and
+ gets flushed from the cache tier. Conceptually, the cache tier is
+ overlaid in front
of the backing storage tier. When a
+ Ceph client needs data that resides in the storage tier, the cache
+ tiering agent migrates the data to the cache tier on read, then it is
+ sent to the Ceph client. Thereafter, the Ceph client can perform I/O
+ using the cache tier, until the data becomes inactive. This is ideal for
+ mutable data such as photo or video editing, transactional data, etc.
+
+
+
+
+ read-only mode
+
+
+ When administrators configure tiers with read-only mode, Ceph clients
+ write data to the backing tier. On read, Ceph copies the requested
+ objects from the backing tier to the cache tier. Stale objects get
+ removed from the cache tier based on the defined policy. This approach is
+ ideal for immutable data such as presenting pictures or videos on a
+ social network, DNA data, X-ray imaging, etc., because reading data from
+ a cache pool that might contain out-of-date data provides weak
+ consistency. Do not use read-only mode for mutable data.
+
+
+
+
+
+
+ Setting Up an Example Tiered Storage
+
+
+ This section illustrates how to set up a fast SSD cache tier (hot-storage)
+ in front of a standard hard disk (cold-storage).
+
+
+
+
+ The following example is for illustration purposes only and includes a
+ setup with one root and one rule for the SSD part residing on a single
+ Ceph node.
+
+
+ In the production environment, the cluster setup typically includes more
+ root and rule entries for the hot storage, and also mixed nodes with both
+ SSDs and SATA disks.
+
+
+
+
+
+
+ Prepare a host machine with fast drives, such as SSDs. This cluster node
+ will act as a fast cache tier.
+
+
+
+
+ Turn the machine into a Ceph node. Install the software and configure
+ the host machine as described in
+ . Let us assume that
+ its name is node-4.
+
+
+
+
+ You need to create 4 OSDs nodes. For this purpose run
+ ceph-deploy from the admin server (refer to
+ ). Remember to
+ replace node-4 with the actual node name and
+ device with the actual device name:
+
+cephadm > for d in a b c d; do
+ ceph-deploy osd create node-4:device${d}
+done
+
+ This may result in an entry like this in the CRUSH map:
+
+[...]
+host node-4 {
+ id -5 # do not change unnecessarily
+ # weight 0.012
+ alg straw
+ hash 0 # rjenkins1
+ item osd.6 weight 0.003
+ item osd.7 weight 0.003
+ item osd.8 weight 0.003
+ item osd.9 weight 0.003
+}
+[...]
+
+
+
+ Edit the CRUSH map for the hot-storage pool mapped to the OSDs backed by
+ the fast SSD drives. Define a second hierarchy with a root node for the
+ SSDs (as root ssd
). Additionally, change the weight and a
+ CRUSH rule for the SSDs. For more information on CRUSH map, see
+ .
+
+
+ Edit the CRUSH map directly with command line tools such as
+ getcrushmap and crushtool:
+
+
+
+
+ Retrieve the current map and save it as c.map:
+
+sudo ceph osd getcrushmap -o c.map
+
+
+
+ Decompile c.map and save it as
+ c.txt:
+
+cephadm > crushtool -d c.map -o c.txt
+
+
+
+ Edit c.txt:
+
+[...]
+host node-4 {
+ id -5 # do not change unnecessarily
+ # weight 4.000
+ alg straw
+ hash 0 # rjenkins1
+ item osd.6 weight 1.000
+ item osd.7 weight 1.000
+ item osd.8 weight 1.000
+ item osd.9 weight 1.000
+}
+root ssd { # newly added root for the SSD hot-storage
+ id -6
+ alg straw
+ hash 0
+ item node-4 weight 4.00
+}
+rule ssd {
+ ruleset 4
+ type replicated
+ min_size 0
+ max_size 4
+ step take ssd
+ step chooseleaf firstn 0 type host
+ step emit
+}
+[...]
+
+
+
+ Compile the edited c.txt file and save it as
+ ssd.map:
+
+cephadm > crushtool -c c.txt -o ssd.map
+
+
+
+ Finally install ssd.map as the new CRUSH map:
+
+sudo ceph osd setcrushmap -i ssd.map
+
+
+
+
+
+ Create the hot-storage pool to be used for cache tiering. Use the new
+ 'ssd' rule for it:
+
+sudo ceph osd pool create hot-storage 100 100 replicated ssd
+
+
+
+ Create the cold-storage pool using the default 'replicated_ruleset' rule:
+
+sudo ceph osd pool create cold-storage 100 100 replicated replicated_ruleset
+
+
+
+ Then setting up a cache tier involves associating a backing storage pool
+ with a cache pool, in this case cold-storage (= storage pool) with
+ hot-storage (= cache pool):
+
+sudo ceph osd tier add cold-storage hot-storage
+
+
+
+ To set the cache mode to writeback
, execute the following:
+
+sudo ceph osd tier cache-mode hot-storage writeback
+
+ For more information about cache modes, see
+ .
+
+
+ Writeback cache tiers overlay the backing storage tier, so they require
+ one additional step: you must direct all client traffic from the storage
+ pool to the cache pool. To direct client traffic directly to the cache
+ pool, execute the following for example:
+
+sudo ceph osd tier set-overlay cold-storage hot-storage
+
+
+
+
+ Configuring a Cache Tier
+
+ There are several options you can use to configure cache tiers. Use the
+ following syntax:
+
+sudo ceph osd pool set cachepool key value
+
+ Target Size and Type
+
+ Ceph's production cache tiers use a Bloom Filter for the
+ :
+
+sudo ceph osd pool set cachepool hit_set_type bloom
+
+ The and
+ define how much time each HitSet should cover, and how many such HitSets
+ to store.
+
+sudo ceph osd pool set cachepool hit_set_count 12
+sudo ceph osd pool set cachepool hit_set_period 14400
+sudo ceph osd pool set cachepool target_max_bytes 1000000000000
+
+
+ A larger results in more RAM consumed by
+ the ceph-osd process.
+
+
+
+ The defines how many HitSets
+ to check for the existence of an object when handling a read operation.
+ The checking result is used to decide whether to promote the object
+ asynchronously. Its value should be between 0 and
+ . If set to 0, the object is always
+ promoted. If set to 1, the current HitSet is checked. And if this object
+ is in the current HitSet, it is promoted, otherwise not. For the other
+ values, the exact number of archive HitSets are checked. The object is
+ promoted if the object is found in any of the most recent
+ HitSets.
+
+
+ You can set a similar parameter
+ for the write operation:
+
+sudo ceph osd pool set cachepool min_read_recency_for_promote 2
+sudo ceph osd pool set cachepool min_write_recency_for_promote 2
+
+
+ The longer the period and the higher the
+ and
+ values, the more RAM the
+ ceph-osd daemon consumes. In
+ particular, when the agent is active to flush or evict cache objects, all
+ HitSets are loaded into RAM.
+
+
+
+
+ Cache Sizing
+
+ The cache tiering agent performs two main functions:
+
+
+
+ Flushing
+
+
+ The agent identifies modified (or dirty) objects and forwards them to
+ the storage pool for long-term storage.
+
+
+
+
+ Evicting
+
+
+ The agent identifies objects that have not been modified (or clean) and
+ evicts the least recently used among them from the cache.
+
+
+
+
+
+ Absolute Sizing
+
+ The cache tiering agent can flush or evict objects based upon the total
+ number of bytes or the total number of objects. To specify a maximum
+ number of bytes, execute the following:
+
+sudo ceph osd pool set cachepool target_max_bytes num_of_bytes
+
+ To specify the maximum number of objects, execute the following:
+
+sudo ceph osd pool set cachepool target_max_objects num_of_bytes
+
+
+ Ceph is not able to determine the size of a cache pool automatically,
+ so the configuration on the absolute size is required here, otherwise
+ the flush/evict will not work. If you specify both limits, the cache
+ tiering agent will begin flushing or evicting when either threshold is
+ triggered.
+
+
+
+
+ All client requests will be blocked only when
+ or
+ reached.
+
+
+
+
+ Relative Sizing
+
+ The cache tiering agent can flush or evict objects relative to the size
+ of the cache pool (specified by /
+ in
+ ). When the cache pool
+ consists of a certain percentage of modified (or dirty) objects, the
+ cache tiering agent will flush them to the storage pool. To set the
+ , execute the following:
+
+sudo ceph osd pool set cachepool cache_target_dirty_ratio 0.0...1.0
+
+ For example, setting the value to 0.4 will begin flushing modified
+ (dirty) objects when they reach 40% of the cache pool's capacity:
+
+sudo ceph osd pool set hot-storage cache_target_dirty_ratio 0.4
+
+ When the dirty objects reach a certain percentage of its capacity, flush
+ dirty objects with a higher speed. Use
+ :
+
+sudo ceph osd pool set cachepool cache_target_dirty_high_ratio 0.0..1.0
+
+ When the cache pool reaches a certain percentage of its capacity, the
+ cache tiering agent will evict objects to maintain free capacity. To set
+ the , execute the following:
+
+sudo ceph osd pool set cachepool cache_target_full_ratio 0.0..1.0
+
+
+
+ Cache Age
+
+ You can specify the minimum age of an object before the cache tiering
+ agent flushes a recently modified (or dirty) object to the backing storage
+ pool:
+
+sudo ceph osd pool set cachepool cache_min_flush_age num_of_seconds
+
+ You can specify the minimum age of an object before it will be evicted
+ from the cache tier:
+
+sudo ceph osd pool set cachepool cache_min_evict_age num_of_seconds
+
+
+
+
+
+
+ Accessing Cluster Data
+
+
+ Ceph RADOS Gateway
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES2.1
+
+
+
+ Ceph RADOS Gateway is an object storage interface built on top of
+ librgw to provide applications with a RESTful
+ gateway to Ceph Storage Clusters. Ceph Object Storage supports two
+ interfaces:
+
+
+
+
+ S3-compatible: Provides object storage functionality
+ with an interface that is compatible with a large subset of the Amazon S3
+ RESTful API.
+
+
+
+
+ Swift-compatible: Provides object storage
+ functionality with an interface that is compatible with a large subset of
+ the OpenStack Swift API.
+
+
+
+
+ Ceph Object Storage uses the Ceph RADOS Gateway daemon
+ (radosgw), which uses an embedded HTTP server
+ (CivetWeb) for interacting with a Ceph Storage Cluster. Since it provides
+ interfaces compatible with OpenStack Swift and Amazon S3, the Ceph RADOS Gateway
+ has its own user management. Ceph RADOS Gateway can store data in the same Ceph
+ Storage Cluster used to store data from Ceph File System clients or Ceph
+ Block Device clients. The S3 and Swift APIs share a common name space, so you
+ may write data with one API and retrieve it with the other.
+
+
+ This section helps you install and manage the Ceph RADOS Gateway (RADOS Gateway). You can
+ either choose to use the ceph-deploy tool, or do the
+ installation and management manually.
+
+
+
+ Before installing RADOS Gateway, you need to have the Ceph cluster installed first
+ (see for more information).
+
+
+
+ Managing RADOS Gateway with ceph-deploy
+
+
+ This section describes how to install and configure RADOS Gateway with
+ ceph-deploy.
+
+
+
+ Installation
+
+ The ceph-deploy script includes the
+ rgw component that helps you manage the RADOS Gateway creation
+ and operation.
+
+
+ Install Ceph
+
+ Before running ceph-deploy rgw as suggested in the
+ following step, make sure that Ceph together with the object gateway
+ package are correctly installed on the node where you want to setup RADOS Gateway:
+
+ ceph-deploy install --rgw short_rgw_hostname
+
+
+
+ Prepare and activate the nodes in one step. You can specify several pairs
+ of
+ short_hostname:gateway_name
+ to install RADOS Gateway on a required number of nodes.
+
+ceph-deploy --overwrite-conf rgw create \
+ short_hostname:gateway_name ...
+
+ For example:
+
+ceph-deploy --overwrite-conf rgw create ceph-node1:rgw.gateway1
+
+ You now have a working RADOS Gateway on the specified nodes, and you need to give
+ access to a client. For more information, see
+ .
+
+
+
+
+ Listing RADOS Gateway Installations
+
+ To list all RADOS Gateway instances within the Ceph cluster, run:
+
+ceph-deploy rgw list
+
+
+
+ Removing RADOS Gateway from a Node
+
+ To remove a RADOS Gateway installation from the node where it was previously
+ installed, run:
+
+ceph-deploy --overwrite-conf rgw delete \
+ short_hostname:gatewayname ...
+
+ For example:
+
+ceph-deploy --overwrite-conf rgw delete ceph-node1:rgw.gateway1
+
+
+ You need a copy of the local ceph.conf file, in your
+ current working directory. If you do not have a copy of it, copy it from
+ your cluster.
+
+
+
+
+
+ Managing RADOS Gateway Manually
+
+
+ This section describes how to install and configure RADOS Gateway manually.
+
+
+
+ Installation
+
+
+
+ Install RADOS Gateway. The following command installs all required components:
+
+sudo zypper ref && sudo zypper in ceph-radosgw
+
+
+
+ If the Apache server from the previous RADOS Gateway instance is running, stop it
+ and disable the relevant service:
+
+sudo systemctl stop disable apache2.service
+
+
+
+ Edit /etc/ceph/ceph.conf and add the following
+ lines:
+
+[client.rgw.gateway]
+ rgw frontends = "civetweb port=80"
+
+
+ If you want to configure RADOS Gateway/CivetWeb for use with SSL encryption,
+ modify the line accordingly:
+
+rgw frontends = civetweb port=7480s ssl_certificate=path_to_certificate.pem
+
+
+
+
+ Restart the RADOS Gateway service. See for
+ more information.
+
+
+
+
+
+
+ Configuring RADOS Gateway
+
+ Several steps are required to configure a RADOS Gateway.
+
+
+ Basic Configuration
+
+ Configuring a Ceph RADOS Gateway requires a running Ceph Storage Cluster. The
+ Ceph RADOS Gateway is a client of the Ceph Storage Cluster. As a Ceph
+ Storage Cluster client, it requires:
+
+
+
+
+ A host name for the gateway instance, for example
+ gateway.
+
+
+
+
+ A storage cluster user name with appropriate permissions and a keyring.
+
+
+
+
+ Pools to store its data.
+
+
+
+
+ A data directory for the gateway instance.
+
+
+
+
+ An instance entry in the Ceph Configuration file.
+
+
+
+
+ Each instance must have a user name and key to communicate with a Ceph
+ storage cluster. In the following steps, we use a monitor node to create a
+ bootstrap keyring, then create the RADOS Gateway instance user keyring based on
+ the bootstrap one. Then, we create a client user name and key. Next, we
+ add the key to the Ceph Storage Cluster. Finally, we distribute the
+ keyring to the node containing the gateway instance.
+
+
+
+
+ Create a keyring for the gateway:
+
+sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.rgw.keyring
+sudo chmod +r /etc/ceph/ceph.client.rgw.keyring
+
+
+
+ Generate a Ceph RADOS Gateway user name and key for each instance. As an
+ example, we will use the name gateway after
+ client.radosgw:
+
+sudo ceph-authtool /etc/ceph/ceph.client.rgw.keyring \
+ -n client.rgw.gateway --gen-key
+
+
+
+ Add capabilities to the key:
+
+sudo ceph-authtool -n client.rgw.gateway --cap osd 'allow rwx' \
+ --cap mon 'allow rwx' /etc/ceph/ceph.client.rgw.keyring
+
+
+
+ Once you have created a keyring and key to enable the Ceph Object
+ Gateway with access to the Ceph Storage Cluster, add the key to your
+ Ceph Storage Cluster. For example:
+
+sudo ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.rgw.gateway \
+ -i /etc/ceph/ceph.client.rgw.keyring
+
+
+
+ Distribute the keyring to the node with the gateway instance:
+
+sudo scp /etc/ceph/ceph.client.rgw.keyring ceph@hostname:/home/ceph
+ssh hostname
+sudo mv ceph.client.rgw.keyring /etc/ceph/ceph.client.rgw.keyring
+
+
+
+ Use Bootstrap Keyring
+
+ An alternative way is to create the RADOS Gateway bootstrap keyring, and then
+ create the RADOS Gateway keyring from it:
+
+
+
+
+ Create a RADOS Gateway bootstrap keyring on one of the monitor nodes:
+
+sudo ceph \
+ auth get-or-create client.bootstrap-rgw mon 'allow profile bootstrap-rgw' \
+ --connect-timeout=25 \
+ --cluster=ceph \
+ --name mon. \
+ --keyring=/var/lib/ceph/mon/ceph-node_host/keyring \
+ -o /var/lib/ceph/bootstrap-rgw/keyring
+
+
+
+ Create the
+ /var/lib/ceph/radosgw/ceph-rgw_name
+ directory for storing the bootstrap keyring:
+
+sudo mkdir \
+/var/lib/ceph/radosgw/ceph-rgw_name
+
+
+
+ Create a RADOS Gateway keyring from the newly created bootstrap keyring:
+
+sudo ceph \
+ auth get-or-create client.rgw.rgw_name osd 'allow rwx' mon 'allow rw' \
+ --connect-timeout=25 \
+ --cluster=ceph \
+ --name client.bootstrap-rgw \
+ --keyring=/var/lib/ceph/bootstrap-rgw/keyring \
+ -o /var/lib/ceph/radosgw/ceph-rgw_name/keyring
+
+
+
+ Copy the RADOS Gateway keyring to the RADOS Gateway host:
+
+sudo scp \
+/var/lib/ceph/radosgw/ceph-rgw_name/keyring \
+rgw_host:/var/lib/ceph/radosgw/ceph-rgw_name/keyring
+
+
+
+
+
+ Create Pools (Optional)
+
+ Ceph RADOS Gateways require Ceph Storage Cluster pools to store specific
+ gateway data. If the user you created has proper permissions, the gateway
+ will create the pools automatically. However, ensure that you have set an
+ appropriate default number of placement groups per pool in the Ceph
+ configuration file.
+
+
+ When configuring a gateway with the default region and zone, the naming
+ convention for pools typically uses 'default' for region and zone naming,
+ but you can use any naming convention you prefer:
+
+.rgw.root
+default.rgw.control
+default.rgw.data.root
+default.rgw.gc
+default.rgw.log
+default.rgw.users.uid
+default.rgw.users.email
+default.rgw.users.keys
+default.rgw.meta
+default.rgw.users.swift
+
+ To create the pools manually, see
+ .
+
+
+
+ Adding Gateway Configuration to Ceph
+
+ Add the Ceph RADOS Gateway configuration to the Ceph Configuration file. The
+ Ceph RADOS Gateway configuration requires you to identify the Ceph RADOS Gateway
+ instance. Then, specify the host name where you installed the Ceph RADOS Gateway
+ daemon, a keyring (for use with cephx), and optionally a log file. For
+ example:
+
+[client.rgw.instance-name]
+host = hostname
+keyring = /etc/ceph/ceph.client.rgw.keyring
+
+ RADOS Gateway Log File
+
+ To override the default RADOS Gateway log file, include the follwing:
+
+log file = /var/log/radosgw/client.rgw.instance-name.log
+
+
+ The [client.rgw.*] portion of the gateway instance
+ identifies this portion of the Ceph configuration file as configuring a
+ Ceph Storage Cluster client where the client type is a Ceph RADOS Gateway
+ (radosgw). The instance name follows. For example:
+
+[client.rgw.gateway]
+host = ceph-gateway
+keyring = /etc/ceph/ceph.client.rgw.keyring
+
+
+ The host must be your machine host name,
+ excluding the domain name.
+
+
+
+ Then turn off print continue. If you have it set to
+ true, you may encounter problems with PUT operations:
+
+rgw print continue = false
+
+
+ To use a Ceph RADOS Gateway with subdomain S3 calls (for example
+ http://bucketname.hostname), you must add the Ceph
+ RADOS Gateway DNS name under the [client.rgw.gateway] section
+ of the Ceph configuration file:
+
+[client.rgw.gateway]
+...
+rgw dns name = hostname
+
+ You should also consider installing a DNS server such as Dnsmasq on your
+ client machine(s) when using the
+ http://bucketname.hostname
+ syntax. The dnsmasq.conf file should include the
+ following settings:
+
+address=/hostname/host-ip-address
+listen-address=client-loopback-ip
+
+ Then, add the client-loopback-ip IP address as
+ the first DNS server on the client machine(s).
+
+
+
+ Redeploy Ceph Configuration
+
+ Use ceph-deploy to push a new copy of the configuration
+ to the hosts in your cluster:
+
+ceph-deploy config push host-name [host-name]...
+
+
+ Create Data Directory
+
+ Deployment scripts may not create the default Ceph RADOS Gateway data directory.
+ Create data directories for each instance of a radosgw daemon if not
+ already done. The host variables in the Ceph
+ configuration file determine which host runs each instance of a radosgw
+ daemon. The typical form specifies the radosgw daemon, the cluster name
+ and the daemon ID.
+
+sudo mkdir -p /var/lib/ceph/radosgw/cluster-id
+
+ Using the exemplary ceph.conf settings above, you would execute the
+ following:
+
+sudo mkdir -p /var/lib/ceph/radosgw/ceph-radosgw.gateway
+
+
+ Restart Services and Start the Gateway
+
+ To ensure that all components have reloaded their configurations, we
+ recommend restarting your Ceph Storage Cluster service. Then, start up
+ the radosgw service. For more information, see
+ and
+ .
+
+
+ After the service is up and running, you can make an anonymous GET request
+ to see if the gateway returns a response. A simple HTTP request to the
+ domain name should return the following:
+
+<ListAllMyBucketsResult>
+ <Owner>
+ <ID>anonymous</ID>
+ <DisplayName/>
+ </Owner>
+ <Buckets/>
+</ListAllMyBucketsResult>
+
+
+
+
+ Operating the RADOS Gateway Service
+
+
+ RADOS Gateway service is operated with the systemctl command. You
+ need to have root privileges to operate the RADOS Gateway service. Note that
+ gateway_host is the host name of the server whose
+ RADOS Gateway instance you need to operate.
+
+
+
+ The following subcommands are supported for the RADOS Gateway service:
+
+
+
+
+ systemctl status ceph-radosgw@rgw.gateway_host
+
+
+
+ Prints the status information of the service.
+
+
+
+
+ systemctl start ceph-radosgw@rgw.gateway_host
+
+
+
+ Starts the service if it is not already running.
+
+
+
+
+ systemctl restart ceph-radosgw@rgw.gateway_host
+
+
+
+ Restarts the service.
+
+
+
+
+ systemctl stop ceph-radosgw@rgw.gateway_host
+
+
+
+ Stops the running service.
+
+
+
+
+ systemctl enable ceph-radosgw@rgw.gateway_host
+
+
+
+ Enables the service so that it is automatically started on system
+ start-up.
+
+
+
+
+ systemctl disable ceph-radosgw@rgw.gateway_host
+
+
+
+ Disables the service so that it is not automatically started on system
+ start-up.
+
+
+
+
+
+
+ Managing RADOS Gateway Access
+
+
+ You can communicate with RADOS Gateway using either S3- or Swift-compatible
+ interface. Both interfaces require you to create a specific user, and
+ install the relevant client software to communicate with the gateway using
+ the user's secret key.
+
+
+
+ For an introduction and a few practical examples on RADOS Gateway access, see
+ .
+
+
+
+ Managing S3 Access
+
+ S3 interface is compatible with a large subset of the Amazon S3 RESTful
+ API.
+
+
+
+ S3cmd is a command line S3 client. You can find it in the
+ OpenSUSE
+ Build Service. The repository contains versions for both SUSE Linux Enterprise and
+ openSUSE based distributions.
+
+
+
+ Adding Users
+
+ See .
+
+
+
+ Removing Users
+
+ See .
+
+
+
+ Changing User Passwords
+
+ See .
+
+
+
+ Setting Quotas
+
+ See .
+
+
+
+
+
+ Managing Swift Access
+
+ Swift interface is compatible with a large subset of the OpenStack Swift
+ API.
+
+
+ Adding Users
+
+ See .
+
+
+
+ Removing Users
+
+ See .
+
+
+
+ Changing Passwords
+
+ See .
+
+
+
+
+
+
+
+ Multisite Object Storage Gateways
+
+
+ You can configure each RADOS Gateway to participate in a federated architecture,
+ working in an active zone configuration while allowing for writes to
+ non-master zones.
+
+
+
+ Terminology
+
+ A description of terms specific to a federated architecture follows:
+
+
+
+ Zone
+
+
+ A logical grouping of one or more RADOS Gateway instances. There must be one
+ zone designated as the master zone in a
+ zonegroup, which handles all bucket and user
+ creation.
+
+
+
+
+ Zonegroup
+
+
+ A zonegroup consists of multiple zones. There should be a master
+ zonegroup that will handle changes to the system configuration.
+
+
+
+
+ Zonegroup map
+
+
+ A configuration structure that holds the map of the entire system, for
+ example which zonegroup is the master, relationships between different
+ zonegroups, and certain configuration options such as storage policies.
+
+
+
+
+ Realm
+
+
+ A container for zonegroups. This allows for separation of zonegroups
+ between clusters. It is possible to create multiple realms, making it
+ easier to run completely different configurations in the same cluster.
+
+
+
+
+ Period
+
+
+ A period holds the configuration structure for the current state of the
+ realm. Every period contains a unique ID and an epoch. Every realm has
+ an associated current period, holding the current state of configuration
+ of the zonegroups and storage policies. Any configuration change for a
+ non-master zone will increment the period's epoch. Changing the master
+ zone to a different zone will trigger the following changes:
+
+
+
+
+ A new period is generated with a new period ID and epoch of 1.
+
+
+
+
+ Realm's current period is updated to point to the newly generated
+ period ID.
+
+
+
+
+ Realm's epoch is incremented.
+
+
+
+
+
+
+
+
+
+ Example Cluster Setup
+
+ In this example, we will focus on creating a single zone group with three
+ separate zones, which actively synchronize their data. Two zones belong to
+ the same cluster, while the third belongs to a different one. There is no
+ synchronization agent involved in mirroring data changes between the
+ RADOS Gateways. This allows for a much simpler configuration scheme and
+ active-active configurations. Note that metadata operations—such as
+ creating a new user—still need to go through the master zone.
+ However, data operations—such as creation of buckets and
+ objects—can be handled by any of the zones.
+
+
+
+
+ System Keys
+
+ While configuring zones, RADOS Gateway expects creation of an S3-compatible system
+ user together with their access and secret keys. This allows another RADOS Gateway
+ instance to pull the configuration remotely with the access and secret
+ keys. For more information on creating S3 users, see
+ .
+
+
+
+ It is useful to generate the access and secret keys before the zone
+ creation itself because it makes scripting and use of configuration
+ management tools easier later on.
+
+
+
+ For the purpose of this example, let us assume that the access and secret
+ keys are set in the environment variables:
+
+# SYSTEM_ACCESS_KEY=1555b35654ad1656d805
+# SYSTEM_SECRET_KEY=h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
+
+ Generally, access keys consist of 20 alphanumeric characters, while secret
+ keys consist of 40 alphanumeric characters (they can contain +/= characters
+ as well). You can generate these keys in the command line:
+
+# SYSTEM_ACCESS_KEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+# SYSTEM_SECRET_KEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 40 | head -n 1)
+
+
+
+ Naming Conventions
+
+ This example describes the process of setting up a master zone. We will
+ assume a zonegroup called us spanning the United States,
+ which will be our master zonegroup. This will contain two zones written in
+ a zonegroup-zone
+ format. This is our convention only and you can choose a format that you
+ prefer. In summary:
+
+
+
+
+ Master zonegroup: United States us
+
+
+
+
+ Master zone: United States, East Region 1: us-east-1
+
+
+
+
+ Secondary zone: United States, East Region 2:
+ us-east-2
+
+
+
+
+ Secondary zone: United States, West Region: us-west
+
+
+
+
+ This will be a part of a larger realm named gold. The
+ us-east-1 and us-east-2 zones are
+ part of the same Ceph cluster, us-east-1 being the
+ primary one. us-west is in a different Ceph cluster.
+
+
+
+
+ Default Pools
+
+ When configured with the appropriate permissions, RADOS Gateway creates default
+ pools on its own. The pg_num and
+ pgp_num values are taken from the
+ ceph.conf configuration file. Pools related to a zone
+ by default follow the convention of
+ zone-name.pool-name.
+ For example for the us-east-1 zone, it will be the
+ following pools:
+
+.rgw.root
+us-east-1.rgw.control
+us-east-1.rgw.data.root
+us-east-1.rgw.gc
+us-east-1.rgw.log
+us-east-1.rgw.intent-log
+us-east-1.rgw.usage
+us-east-1.rgw.users.keys
+us-east-1.rgw.users.email
+us-east-1.rgw.users.swift
+us-east-1.rgw.users.uid
+us-east-1.rgw.buckets.index
+us-east-1.rgw.buckets.data
+us-east-1.rgw.meta
+
+ These pools can be created in other zones as well, by replacing
+ us-east-1 with the appropriate zone name.
+
+
+
+
+ Creating a Realm
+
+ Configure a realm called gold and make it the default
+ realm:
+
+cephadm > radosgw-admin realm create --rgw-realm=gold --default
+{
+ "id": "4a367026-bd8f-40ee-b486-8212482ddcd7",
+ "name": "gold",
+ "current_period": "09559832-67a4-4101-8b3f-10dfcd6b2707",
+ "epoch": 1
+}
+
+ Note that every realm has an ID, which allows for flexibility such as
+ renaming the realm later if needed. The current_period
+ changes whenever we change anything in the master zone. The
+ epoch is incremented when there is a change in the
+ master zone's configuration which results in a change of the current
+ period.
+
+
+
+
+ Deleting the Default Zonegroup
+
+ The default installation of RADOS Gateway creates the default zonegroup called
+ default. Because we no longer need the default
+ zonegroup, remove it.
+
+cephadm > radosgw-admin zonegroup delete --rgw-zonegroup=default
+
+
+
+ Creating a Master Zonegroup
+
+ Create a master zonegroup called us. The zonegroup will
+ manage the zonegroup map and propagate changes to the rest of the system.
+ By marking the zonegroup as default, you allow explicitly mentioning the
+ rgw-zonegroup switch for later commands.
+
+cephadm > radosgw-admin zonegroup create --rgw-zonegroup=us \
+--endpoints=http://rgw1:80 --master --default
+{
+ "id": "d4018b8d-8c0d-4072-8919-608726fa369e",
+ "name": "us",
+ "api_name": "us",
+ "is_master": "true",
+ "endpoints": [
+ "http:\/\/rgw1:80"
+ ],
+ "hostnames": [],
+ "hostnames_s3website": [],
+ "master_zone": "",
+ "zones": [],
+ "placement_targets": [],
+ "default_placement": "",
+ "realm_id": "4a367026-bd8f-40ee-b486-8212482ddcd7"
+}
+
+ Alternatively, you can mark a zonegroup as default with the following
+ command:
+
+cephadm > radosgw-admin zonegroup default --rgw-zonegroup=us
+
+
+
+ Creating a Master Zone
+
+ Now create a default zone and add it to the default zonegroup. Note that
+ you will use this zone for metadata operations such as user creation:
+
+cephadm > radosgw-admin zone create --rgw-zonegroup=us --rgw-zone=us-east-1 \
+--endpoints=http://rgw1:80 --access-key=$SYSTEM_ACCESS_KEY --secret=$SYSTEM_SECRET_KEY
+{
+ "id": "83859a9a-9901-4f00-aa6d-285c777e10f0",
+ "name": "us-east-1",
+ "domain_root": "us-east-1/gc.rgw.data.root",
+ "control_pool": "us-east-1/gc.rgw.control",
+ "gc_pool": "us-east-1/gc.rgw.gc",
+ "log_pool": "us-east-1/gc.rgw.log",
+ "intent_log_pool": "us-east-1/gc.rgw.intent-log",
+ "usage_log_pool": "us-east-1/gc.rgw.usage",
+ "user_keys_pool": "us-east-1/gc.rgw.users.keys",
+ "user_email_pool": "us-east-1/gc.rgw.users.email",
+ "user_swift_pool": "us-east-1/gc.rgw.users.swift",
+ "user_uid_pool": "us-east-1/gc.rgw.users.uid",
+ "system_key": {
+ "access_key": "1555b35654ad1656d804",
+ "secret_key": "h7GhxuBLTrlhVUyxSPUKUV8r\/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
+ },
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "us-east-1/gc.rgw.buckets.index",
+ "data_pool": "us-east-1/gc.rgw.buckets.data",
+ "data_extra_pool": "us-east-1/gc.rgw.buckets.non-ec",
+ "index_type": 0
+ }
+ }
+ ],
+ "metadata_heap": "us-east-1/gc.rgw.meta",
+ "realm_id": "4a367026-bd8f-40ee-b486-8212482ddcd7"
+}
+
+ Note that the and
+ switches add the zone to a zonegroup and make it
+ the default zone. Alternatively, the same can also be done with the
+ following commands:
+
+cephadm > radosgw-admin zone default --rgw-zone=us-east-1
+cephadm > radosgw-admin zonegroup add --rgw-zonegroup=us --rgw-zone=us-east-1
+
+ Creating System Users
+
+ To access zone pools, you need to create a system user. Note that you will
+ need these keys when configuring the secondary zone as well.
+
+cephadm > radosgw-admin user create --uid=zone.user \
+--display-name="Zone User" --access-key=$SYSTEM_ACCESS_KEY \
+--secret=$SYSTEM_SECRET_KEY --system
+
+
+ Update the Period
+
+ Because you changed the master zone configuration, you need to commit the
+ changes for them to take effect in the realm configuration structure.
+ Initially, the period looks like this:
+
+cephadm > radosgw-admin period get
+{
+ "id": "09559832-67a4-4101-8b3f-10dfcd6b2707", "epoch": 1, "predecessor_uuid": "", "sync_status": [], "period_map":
+ {
+ "id": "09559832-67a4-4101-8b3f-10dfcd6b2707", "zonegroups": [], "short_zone_ids": []
+ }, "master_zonegroup": "", "master_zone": "", "period_config":
+ {
+ "bucket_quota": {
+ "enabled": false, "max_size_kb": -1, "max_objects": -1
+ }, "user_quota": {
+ "enabled": false, "max_size_kb": -1, "max_objects": -1
+ }
+ }, "realm_id": "4a367026-bd8f-40ee-b486-8212482ddcd7", "realm_name": "gold", "realm_epoch": 1
+}
+
+ Update the period and commit the changes:
+
+cephadm > radosgw-admin period update --commit
+{
+ "id": "b5e4d3ec-2a62-4746-b479-4b2bc14b27d1",
+ "epoch": 1,
+ "predecessor_uuid": "09559832-67a4-4101-8b3f-10dfcd6b2707",
+ "sync_status": [ "[...]"
+ ],
+ "period_map": {
+ "id": "b5e4d3ec-2a62-4746-b479-4b2bc14b27d1",
+ "zonegroups": [
+ {
+ "id": "d4018b8d-8c0d-4072-8919-608726fa369e",
+ "name": "us",
+ "api_name": "us",
+ "is_master": "true",
+ "endpoints": [
+ "http:\/\/rgw1:80"
+ ],
+ "hostnames": [],
+ "hostnames_s3website": [],
+ "master_zone": "83859a9a-9901-4f00-aa6d-285c777e10f0",
+ "zones": [
+ {
+ "id": "83859a9a-9901-4f00-aa6d-285c777e10f0",
+ "name": "us-east-1",
+ "endpoints": [
+ "http:\/\/rgw1:80"
+ ],
+ "log_meta": "true",
+ "log_data": "false",
+ "bucket_index_max_shards": 0,
+ "read_only": "false"
+ }
+ ],
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": []
+ }
+ ],
+ "default_placement": "default-placement",
+ "realm_id": "4a367026-bd8f-40ee-b486-8212482ddcd7"
+ }
+ ],
+ "short_zone_ids": [
+ {
+ "key": "83859a9a-9901-4f00-aa6d-285c777e10f0",
+ "val": 630926044
+ }
+ ]
+ },
+ "master_zonegroup": "d4018b8d-8c0d-4072-8919-608726fa369e",
+ "master_zone": "83859a9a-9901-4f00-aa6d-285c777e10f0",
+ "period_config": {
+ "bucket_quota": {
+ "enabled": false,
+ "max_size_kb": -1,
+ "max_objects": -1
+ },
+ "user_quota": {
+ "enabled": false,
+ "max_size_kb": -1,
+ "max_objects": -1
+ }
+ },
+ "realm_id": "4a367026-bd8f-40ee-b486-8212482ddcd7",
+ "realm_name": "gold",
+ "realm_epoch": 2
+}
+
+
+ Start the RADOS Gateway
+
+ You need to mention the RADOS Gateway zone and port options in the configuration
+ file before starting the RADOS Gateway. For more information on RADOS Gateway and its
+ configuration, see . The configuration
+ section of RADOS Gateway should look similar to this:
+
+[client.rgw.us-east-1]
+rgw_frontends="civetweb port=80"
+rgw_zone=us-east-1
+
+ Start the RADOS Gateway:
+
+sudo systemctl start ceph-radosgw@rgw.us-east-1
+
+
+
+
+ Creating a Secondary Zone
+
+ In the same cluster, create and configure the secondary zone named
+ us-east-2. You can execute all the following commands in
+ the node hosting the master zone itself.
+
+
+ To create the secondary zone, use the same command as when you created the
+ primary zone, except dropping the master flag:
+
+cephadm > radosgw-admin zone create --rgw-zonegroup=us --endpoints=http://rgw2:80 \
+--rgw-zone=us-east-2 --access-key=$SYSTEM_ACCESS_KEY --secret=$SYSTEM_SECRET_KEY
+{
+ "id": "950c1a43-6836-41a2-a161-64777e07e8b8",
+ "name": "us-east-2",
+ "domain_root": "us-east-2.rgw.data.root",
+ "control_pool": "us-east-2.rgw.control",
+ "gc_pool": "us-east-2.rgw.gc",
+ "log_pool": "us-east-2.rgw.log",
+ "intent_log_pool": "us-east-2.rgw.intent-log",
+ "usage_log_pool": "us-east-2.rgw.usage",
+ "user_keys_pool": "us-east-2.rgw.users.keys",
+ "user_email_pool": "us-east-2.rgw.users.email",
+ "user_swift_pool": "us-east-2.rgw.users.swift",
+ "user_uid_pool": "us-east-2.rgw.users.uid",
+ "system_key": {
+ "access_key": "1555b35654ad1656d804",
+ "secret_key": "h7GhxuBLTrlhVUyxSPUKUV8r\/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
+ },
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "us-east-2.rgw.buckets.index",
+ "data_pool": "us-east-2.rgw.buckets.data",
+ "data_extra_pool": "us-east-2.rgw.buckets.non-ec",
+ "index_type": 0
+ }
+ }
+ ],
+ "metadata_heap": "us-east-2.rgw.meta",
+ "realm_id": "815d74c2-80d6-4e63-8cfc-232037f7ff5c"
+}
+
+ Update the Period
+
+ Inform all the gateways of the new change in the system map by doing a
+ period update and committing the changes:
+
+cephadm > radosgw-admin period update --commit
+{
+ "id": "b5e4d3ec-2a62-4746-b479-4b2bc14b27d1",
+ "epoch": 2,
+ "predecessor_uuid": "09559832-67a4-4101-8b3f-10dfcd6b2707",
+ "sync_status": [ "[...]"
+ ],
+ "period_map": {
+ "id": "b5e4d3ec-2a62-4746-b479-4b2bc14b27d1",
+ "zonegroups": [
+ {
+ "id": "d4018b8d-8c0d-4072-8919-608726fa369e",
+ "name": "us",
+ "api_name": "us",
+ "is_master": "true",
+ "endpoints": [
+ "http:\/\/rgw1:80"
+ ],
+ "hostnames": [],
+ "hostnames_s3website": [],
+ "master_zone": "83859a9a-9901-4f00-aa6d-285c777e10f0",
+ "zones": [
+ {
+ "id": "83859a9a-9901-4f00-aa6d-285c777e10f0",
+ "name": "us-east-1",
+ "endpoints": [
+ "http:\/\/rgw1:80"
+ ],
+ "log_meta": "true",
+ "log_data": "false",
+ "bucket_index_max_shards": 0,
+ "read_only": "false"
+ },
+ {
+ "id": "950c1a43-6836-41a2-a161-64777e07e8b8",
+ "name": "us-east-2",
+ "endpoints": [
+ "http:\/\/rgw2:80"
+ ],
+ "log_meta": "false",
+ "log_data": "true",
+ "bucket_index_max_shards": 0,
+ "read_only": "false"
+ }
+
+ ],
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": []
+ }
+ ],
+ "default_placement": "default-placement",
+ "realm_id": "4a367026-bd8f-40ee-b486-8212482ddcd7"
+ }
+ ],
+ "short_zone_ids": [
+ {
+ "key": "83859a9a-9901-4f00-aa6d-285c777e10f0",
+ "val": 630926044
+ },
+ {
+ "key": "950c1a43-6836-41a2-a161-64777e07e8b8",
+ "val": 4276257543
+ }
+
+ ]
+ },
+ "master_zonegroup": "d4018b8d-8c0d-4072-8919-608726fa369e",
+ "master_zone": "83859a9a-9901-4f00-aa6d-285c777e10f0",
+ "period_config": {
+ "bucket_quota": {
+ "enabled": false,
+ "max_size_kb": -1,
+ "max_objects": -1
+ },
+ "user_quota": {
+ "enabled": false,
+ "max_size_kb": -1,
+ "max_objects": -1
+ }
+ },
+ "realm_id": "4a367026-bd8f-40ee-b486-8212482ddcd7",
+ "realm_name": "gold",
+ "realm_epoch": 2
+}
+
+
+ Start the RADOS Gateway
+
+ Adjust the configuration of the RADOS Gateway for the secondary zone, and start
+ it:
+
+[client.rgw.us-east-2]
+rgw_frontends="civetweb port=80"
+rgw_zone=us-east-2
+cephadm > sudo systemctl start ceph-radosgw@rgw.us-east-2
+
+
+
+
+ Adding RADOS Gateway to the Second Cluster
+
+ The second Ceph cluster belongs to the same zonegroup as the initial one,
+ but may be geographically located elsewhere.
+
+
+ Default Realm and Zonegroup
+
+ Since you already created the realm for the first gateway, pull the realm
+ here and make it the default here:
+
+cephadm > radosgw-admin realm pull --url=http://rgw1:80 \
+--access-key=$SYSTEM_ACCESS_KEY --secret=$SYSTEM_SECRET_KEY
+{
+ "id": "4a367026-bd8f-40ee-b486-8212482ddcd7",
+ "name": "gold",
+ "current_period": "b5e4d3ec-2a62-4746-b479-4b2bc14b27d1",
+ "epoch": 2
+}
+cephadm > radosgw-admin realm default --rgw-realm=gold
+
+ Get the configuration from the master zone by pulling the period:
+
+cephadm > radosgw-admin period pull --url=http://rgw1:80 \
+--access-key=$SYSTEM_ACCESS_KEY --secret=$SYSTEM_SECRET_KEY
+
+ Set the default zonegroup to the already created us
+ zonegroup:
+
+cephadm > radosgw-admin zonegroup default --rgw-zonegroup=us
+
+
+ Secondary Zone Configuration
+
+ Create a new zone named us-west with the same system
+ keys:
+
+cephadm > radosgw-admin zone create --rgw-zonegroup=us --rgw-zone=us-west \
+--access-key=$SYSTEM_ACCESS_KEY --secret=$SYSTEM_SECRET_KEY \
+--endpoints=http://rgw3:80 --default
+{
+ "id": "950c1a43-6836-41a2-a161-64777e07e8b8",
+ "name": "us-west",
+ "domain_root": "us-west.rgw.data.root",
+ "control_pool": "us-west.rgw.control",
+ "gc_pool": "us-west.rgw.gc",
+ "log_pool": "us-west.rgw.log",
+ "intent_log_pool": "us-west.rgw.intent-log",
+ "usage_log_pool": "us-west.rgw.usage",
+ "user_keys_pool": "us-west.rgw.users.keys",
+ "user_email_pool": "us-west.rgw.users.email",
+ "user_swift_pool": "us-west.rgw.users.swift",
+ "user_uid_pool": "us-west.rgw.users.uid",
+ "system_key": {
+ "access_key": "1555b35654ad1656d804",
+ "secret_key": "h7GhxuBLTrlhVUyxSPUKUV8r\/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
+ },
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "us-west.rgw.buckets.index",
+ "data_pool": "us-west.rgw.buckets.data",
+ "data_extra_pool": "us-west.rgw.buckets.non-ec",
+ "index_type": 0
+ }
+ }
+ ],
+ "metadata_heap": "us-west.rgw.meta",
+ "realm_id": "4a367026-bd8f-40ee-b486-8212482ddcd7"
+}
+
+
+ Update the Period
+
+ To propagate the zonegroup map changes, we update and commit the period:
+
+cephadm > radosgw-admin period update --commit --rgw-zone=us-west
+{
+ "id": "b5e4d3ec-2a62-4746-b479-4b2bc14b27d1",
+ "epoch": 3,
+ "predecessor_uuid": "09559832-67a4-4101-8b3f-10dfcd6b2707",
+ "sync_status": [
+ "", # truncated
+ ],
+ "period_map": {
+ "id": "b5e4d3ec-2a62-4746-b479-4b2bc14b27d1",
+ "zonegroups": [
+ {
+ "id": "d4018b8d-8c0d-4072-8919-608726fa369e",
+ "name": "us",
+ "api_name": "us",
+ "is_master": "true",
+ "endpoints": [
+ "http:\/\/rgw1:80"
+ ],
+ "hostnames": [],
+ "hostnames_s3website": [],
+ "master_zone": "83859a9a-9901-4f00-aa6d-285c777e10f0",
+ "zones": [
+ {
+ "id": "83859a9a-9901-4f00-aa6d-285c777e10f0",
+ "name": "us-east-1",
+ "endpoints": [
+ "http:\/\/rgw1:80"
+ ],
+ "log_meta": "true",
+ "log_data": "true",
+ "bucket_index_max_shards": 0,
+ "read_only": "false"
+ },
+ {
+ "id": "950c1a43-6836-41a2-a161-64777e07e8b8",
+ "name": "us-east-2",
+ "endpoints": [
+ "http:\/\/rgw2:80"
+ ],
+ "log_meta": "false",
+ "log_data": "true",
+ "bucket_index_max_shards": 0,
+ "read_only": "false"
+ },
+ {
+ "id": "d9522067-cb7b-4129-8751-591e45815b16",
+ "name": "us-west",
+ "endpoints": [
+ "http:\/\/rgw3:80"
+ ],
+ "log_meta": "false",
+ "log_data": "true",
+ "bucket_index_max_shards": 0,
+ "read_only": "false"
+ }
+ ],
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": []
+ }
+ ],
+ "default_placement": "default-placement",
+ "realm_id": "4a367026-bd8f-40ee-b486-8212482ddcd7"
+ }
+ ],
+ "short_zone_ids": [
+ {
+ "key": "83859a9a-9901-4f00-aa6d-285c777e10f0",
+ "val": 630926044
+ },
+ {
+ "key": "950c1a43-6836-41a2-a161-64777e07e8b8",
+ "val": 4276257543
+ },
+ {
+ "key": "d9522067-cb7b-4129-8751-591e45815b16",
+ "val": 329470157
+ }
+ ]
+ },
+ "master_zonegroup": "d4018b8d-8c0d-4072-8919-608726fa369e",
+ "master_zone": "83859a9a-9901-4f00-aa6d-285c777e10f0",
+ "period_config": {
+ "bucket_quota": {
+ "enabled": false,
+ "max_size_kb": -1,
+ "max_objects": -1
+ },
+ "user_quota": {
+ "enabled": false,
+ "max_size_kb": -1,
+ "max_objects": -1
+ }
+ },
+ "realm_id": "4a367026-bd8f-40ee-b486-8212482ddcd7",
+ "realm_name": "gold",
+ "realm_epoch": 2
+}
+
+ Note that the period epoch number has incremented, indicating a change in
+ the configuration.
+
+
+
+ Start the RADOS Gateway
+
+ This is similar to starting the RADOS Gateway in the first zone. The only
+ difference is that the RADOS Gateway zone configuration should reflect the
+ us-west zone name:
+
+[client.rgw.us-west]
+rgw_frontends="civetweb port=80"
+rgw_zone=us-west
+
+ Start the second RADOS Gateway:
+
+sudo systemctl start ceph-radosgw@rgw.us-west
+
+
+
+
+
+
+
+ Ceph iSCSI Gateway
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES2.1
+
+
+
+ iSCSI is a storage area network (SAN) protocol that allows clients (called
+ initiators) to send SCSI commands to SCSI storage
+ devices (targets) on remote servers. SUSE Enterprise Storage includes
+ a facility that opens Ceph storage management to heterogeneous clients,
+ such as Microsoft Windows* and VMware* vSphere, through the iSCSI protocol. Multipath
+ iSCSI access enables availability and scalability for these clients, and the
+ standardized iSCSI protocol also provides an additional layer of security
+ isolation between clients and the SUSE Enterprise Storage cluster. The configuration
+ facility is named lrbd. Using
+ lrbd, Ceph storage administrators can define
+ thin-provisioned, replicated, highly-available volumes supporting read-only
+ snapshots, read-write clones, and automatic resizing with Ceph RADOS Block
+ Device (RBD). Administrators can then export volumes either via a single
+ lrbd gateway host, or via multiple gateway hosts
+ supporting multipath failover. Linux, Microsoft Windows, and VMware hosts can connect
+ to volumes using the iSCSI protocol, which makes them available like any
+ other SCSI block device. This means SUSE Enterprise Storage customers can effectively run a
+ complete block-storage infrastructure subsystem on Ceph that provides all
+ features and benefits of a conventional SAN enabling future growth.
+
+
+ This chapter introduces detailed information to set up a Ceph cluster
+ infrastructure together with an iSCSI gateway so that the client hosts can
+ use remotely stored data as local storage devices using the iSCSI protocol.
+
+
+ iSCSI Block Storage
+
+
+ iSCSI is an implementation of the Small Computer System Interface (SCSI)
+ command set using the Internet Protocol (IP), specified in RFC 3720. iSCSI
+ is implemented as a service where a client (the initiator) talks to a server
+ (the target) via a session on TCP port 3260. An iSCSI target's IP address
+ and port are called an iSCSI portal, where a target can be exposed through
+ one or more portals. The combination of a target and one or more portals is
+ called the target portal group (TPG).
+
+
+
+ The underlying data link layer protocol for iSCSI is commonly Ethernet. More
+ specifically, modern iSCSI infrastructures use 10 Gigabit Ethernet or faster
+ networks for optimal throughput. 10 Gigabit Ethernet connectivity between
+ the iSCSI gateway and the back-end Ceph cluster is strongly recommended.
+
+
+
+ The Linux Kernel iSCSI Target
+
+ The Linux kernel iSCSI target was originally named LIO for linux-iscsi.org,
+ the project's original domain and Web site. For some time, no fewer than 4
+ competing iSCSI target implementations were available for the Linux
+ platform, but LIO ultimately prevailed as the single iSCSI reference
+ target. The mainline kernel code for LIO uses the simple, but somewhat
+ ambiguous name "target", distinguishing between "target core" and a variety
+ of front-end and back-end target modules.
+
+
+ The most commonly used front-end module is arguably iSCSI. However, LIO
+ also supports Fibre Channel (FC), Fibre Channel over Ethernet (FCoE) and
+ several other front-end protocols. At this time, only the iSCSI protocol is
+ supported by SUSE Enterprise Storage.
+
+
+ The most frequently used target back-end module is one that is capable of
+ simply re-exporting any available block device on the target host. This
+ module is named iblock. However, LIO also has an RBD-specific back-end
+ module supporting parallelized multipath I/O access to RBD images.
+
+
+
+
+ iSCSI Initiators
+
+ This section introduces a brief information on iSCSI initiators used on
+ Linux, Microsoft Windows, and VMware platforms.
+
+
+ Linux
+
+ The standard initiator for the Linux platform is
+ open-iscsi. open-iscsi
+ launches a daemon, iscsid, which the user can
+ then use to discover iSCSI targets on any given portal, log in to targets,
+ and map iSCSI volumes. iscsid communicates with
+ the SCSI mid layer to create in-kernel block devices that the kernel can
+ then treat like any other SCSI block device on the system. The
+ open-iscsi initiator can be deploying in
+ conjunction with the Device Mapper Multipath
+ (dm-multipath) facility to provide a highly
+ available iSCSI block device.
+
+
+
+ Microsoft Windows and Hyper-V
+
+ The default iSCSI initiator for the Microsoft Windows operating system is the
+ Microsoft iSCSI initiator. The iSCSI service can be configured via a
+ graphical user interface (GUI), and supports multipath I/O for high
+ availability.
+
+
+
+ VMware
+
+ The default iSCSI initiator for VMware vSphere and ESX is the VMware
+ ESX software iSCSI initiator, vmkiscsi. When
+ enabled, it can be configured either from the vSphere client, or using the
+ vmkiscsi-tool command. You can then format storage
+ volumes connected through the vSphere iSCSI storage adapter with VMFS, and
+ use them like any other VM storage device. The VMware initiator also
+ supports multipath I/O for high availability.
+
+
+
+
+
+ General Information about lrdb
+
+
+ lrbd combines the benefits of RADOS Block Devices
+ with the ubiquitous versatility of iSCSI. By employing
+ lrbd on an iSCSI target host (known as the
+ lrbd gateway), any application that needs to make
+ use of block storage can benefit from Ceph, even if it does not speak any
+ Ceph client protocol. Instead, users can use iSCSI or any other target
+ front-end protocol to connect to an LIO target, which translates all target
+ I/O to RBD storage operations.
+
+
+
+
+
+ lrbd is inherently highly-available and supports
+ multipath operations. Thus, downstream initiator hosts can use multiple
+ iSCSI gateways for both high availability and scalability. When
+ communicating with an iSCSI configuration with more than one gateway,
+ initiators may load-balance iSCSI requests across multiple gateways. In the
+ event of a gateway failing, being temporarily unreachable, or being disabled
+ for maintenance, I/O will transparently continue via another gateway.
+
+
+
+
+
+ Deployment Considerations
+
+
+ A minimum configuration of SUSE Enterprise Storage with lrbd
+ consists of the following components:
+
+
+
+
+
+ A Ceph storage cluster. The Ceph cluster consists of a minimum of four
+ physical servers hosting at least eight object storage daemons (OSDs)
+ each. In such a configuration, three OSD nodes also double as a monitor
+ (MON) host.
+
+
+
+
+ An iSCSI target server running the LIO iSCSI target, configured via
+ lrbd.
+
+
+
+
+ An iSCSI initiator host, running open-iscsi
+ (Linux), the Microsoft iSCSI Initiator (Microsoft Windows), or any other compatible
+ iSCSI initiator implementation.
+
+
+
+
+
+ A recommended production configuration of SUSE Enterprise Storage with
+ lrbd consists of:
+
+
+
+
+
+ A Ceph storage cluster. A production Ceph cluster consists of any
+ number of (typically more than 10) OSD nodes, each typically running 10-12
+ object storage daemons (OSDs), with no fewer than three dedicated MON
+ hosts.
+
+
+
+
+ Several iSCSI target servers running the LIO iSCSI target, configured via
+ lrbd. For iSCSI fail-over and load-balancing,
+ these servers must run a kernel supporting the
+ target_core_rbd module. Updates packages are
+ available from the SUSE Linux Enterprise Server maintenance channel.
+
+
+
+
+ Any number of iSCSI initiator hosts, running
+ open-iscsi (Linux), the Microsoft iSCSI Initiator
+ (Microsoft Windows), or any other compatible iSCSI initiator implementation.
+
+
+
+
+
+ Installation and Configuration
+
+
+ This section describes steps to install and configure iSCSI gateway on top
+ of SUSE Enterprise Storage.
+
+
+
+ Install SUSE Enterprise Storage and Deploy a Ceph Cluster
+
+ Before you start installing and configuring an iSCSI gateway, you need to
+ install SUSE Enterprise Storage and deploy a Ceph cluster as described in
+ .
+
+
+
+
+ Installing the ceph_iscsi Pattern
+
+ On your designated iSCSI target server nodes, install the
+ ceph_iscsi pattern. Doing so will automatically
+ install lrbd, the necessary Ceph binaries and
+ libraries, and the targetcli command line tool:
+
+sudo zypper in -t pattern ceph_iscsi
+
+ Repeat this step on any node that you want to act as a fail-over or
+ load-balancing target server node.
+
+
+
+
+ Create RBD Images
+
+ RBD images are created in the Ceph store and subsequently exported to
+ iSCSI. We recommend that you use a dedicated RADOS pool for this purpose.
+ You can create a volume from any host that is able to connect to your
+ storage cluster using the Ceph rbd command line
+ utility. This requires the client to have at least a minimal ceph.conf
+ configuration file, and appropriate CephX authentication credentials.
+
+
+ To create a new volume for subsequent export via iSCSI, use the
+ rbd create command, specifying the volume size in
+ megabytes. For example, in order to create a 100GB volume named
+ testvol in the pool named iscsi, run:
+
+rbd --pool iscsi create --size=102400 testvol
+
+ The above command creates an RBD volume in the default format 2.
+
+
+
+ Since SUSE Enterprise Storage 3, the default volume format is 2, and format 1 is
+ deprecated. However, you can still create the deprecated format 1 volumes
+ with the option.
+
+
+
+
+
+ Export RBD Images via iSCSI
+
+ To export RBD images via iSCSI, use the lrbd
+ utility. lrbd allows you to create, review, and
+ modify the iSCSI target configuration, which uses a JSON format.
+
+
+ In order to edit the configuration, use lrbd -e or
+ lrbd --edit. This command will invoke the default
+ editor, as defined by the EDITOR environment variable.
+ You may override this behavior by setting the option in
+ addition to .
+
+
+ Below is an example configuration for
+
+
+
+
+ two iSCSI gateway hosts named iscsi1.example.com and
+ iscsi2.example.com,
+
+
+
+
+ defining a single iSCSI target with an iSCSI Qualified Name (IQN) of
+ iqn.2003-01.org.linux-iscsi.iscsi.x86:testvol,
+
+
+
+
+ with a single iSCSI Logical Unit (LU),
+
+
+
+
+ backed by an RBD image named testvol in the RADOS pool
+ rbd,
+
+
+
+
+ and exporting the target via two portals named "east" and "west":
+
+
+
+{
+ "auth": [
+ {
+ "target": "iqn.2003-01.org.linux-iscsi.iscsi.x86:testvol",
+ "authentication": "none"
+ }
+ ],
+ "targets": [
+ {
+ "target": "iqn.2003-01.org.linux-iscsi.iscsi.x86:testvol",
+ "hosts": [
+ {
+ "host": "iscsi1.example.com",
+ "portal": "east"
+ },
+ {
+ "host": "iscsi2.example.com",
+ "portal": "west"
+ }
+ ]
+ }
+ ],
+ "portals": [
+ {
+ "name": "east",
+ "addresses": [
+ "192.168.124.104"
+ ]
+ },
+ {
+ "name": "west",
+ "addresses": [
+ "192.168.124.105"
+ ]
+ }
+ ],
+ "pools": [
+ {
+ "pool": "rbd",
+ "gateways": [
+ {
+ "target": "iqn.2003-01.org.linux-iscsi.iscsi.x86:testvol",
+ "tpg": [
+ {
+ "image": "testvol"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+ Note that whenever you refer to a host name in the configuration, this host
+ name must match the iSCSI gateway's uname -n command
+ output.
+
+
+ The edited JSON is stored in the extended attributes (xattrs) of a single
+ RADOS object per pool. This object is available to the gateway hosts where
+ the JSON is edited, and all gateway hosts connected to the same Ceph
+ cluster. No configuration information is stored locally on the
+ lrbd gateway.
+
+
+ To activate the configuration, store it in the Ceph cluster, and do one
+ of the following things (as root):
+
+
+
+
+ Run the lrbd command (without additional options) from
+ the command line,
+
+
+
+
+ or
+
+
+
+
+ Restart the lrbd service with service
+ lrbd restart.
+
+
+
+
+ The lrbd "service" does not operate any background
+ daemon. Instead, it simply invokes the lrbd command.
+ This type of service is known as a "one-shot" service.
+
+
+ You should also enable lrbd to auto-configure on
+ system start-up. To do so, run the systemctl enable lrbd
+ command.
+
+
+ The configuration above reflects a simple, one-gateway setup.
+ lrbd configuration can be much more complex and
+ powerful. The lrbd RPM package comes with an
+ extensive set of configuration examples, which you may refer to by checking
+ the contents of the
+ /usr/share/doc/packages/lrbd/samples directory after
+ installation. The samples are also available from
+ .
+
+
+
+
+ Optional Settings
+
+ The following settings may be useful for some environments. For images,
+ there are , ,
+ , , and
+ attributes. The first
+ two— and —allow
+ hardcoding of the 'uuid' or 'lun' for a specific image. You can specify
+ either of them for an image. The ,
+ and affect the
+ attempts to map an rbd image.
+
+"pools": [
+ {
+ "pool": "rbd",
+ "gateways": [
+ {
+ "host": "igw1",
+ "tpg": [
+ {
+ "image": "archive",
+ "uuid": "12345678-abcd-9012-efab-345678901234",
+ "lun": "2",
+ "retries": "3",
+ "sleep": "4",
+ "retry_errors": [ 95 ],
+ [...]
+ }
+ ]
+ }
+ ]
+ }
+]
+
+
+
+ Advanced Settings
+
+ lrdb can be configured with advanced parameters
+ which are subsequently passed on to the LIO I/O target. The parameters are
+ divided up into iSCSI and backing store components, which can then be
+ specified in the "targets" and "tpg" sections, respectively, of the
+ lrbd configuration.
+
+
+
+ Changing these parameters from the default setting is not recommended.
+
+
+"targets": [
+ {
+ [...]
+ "tpg_default_cmdsn_depth": "64",
+ "tpg_default_erl": "0",
+ "tpg_login_timeout": "10",
+ "tpg_netif_timeout": "2",
+ "tpg_prod_mode_write_protect": "0",
+ }
+]
+
+ Description of the options follows:
+
+
+
+ tpg_default_cmdsn_depth
+
+
+ Default CmdSN (Command Sequence Number) depth. Limits the amount of
+ requests that an iSCSI initiator can have outstanding at any moment.
+
+
+
+
+ tpg_default_erl
+
+
+ Default error recovery level.
+
+
+
+
+ tpg_login_timeout
+
+
+ Login timeout value in seconds.
+
+
+
+
+ tpg_netif_timeout
+
+
+ NIC failure timeout in seconds.
+
+
+
+
+ tpg_prod_mode_write_protect
+
+
+ If set to 1, prevent writes to LUNs.
+
+
+
+
+"pools": [
+ {
+ "pool": "rbd",
+ "gateways": [
+ {
+ "host": "igw1",
+ "tpg": [
+ {
+ "image": "archive",
+ "backstore_block_size": "512",
+ "backstore_emulate_3pc": "1",
+ "backstore_emulate_caw": "1",
+ "backstore_emulate_dpo": "0",
+ "backstore_emulate_fua_read": "0",
+ "backstore_emulate_fua_write": "1",
+ "backstore_emulate_model_alias": "0",
+ "backstore_emulate_rest_reord": "0",
+ "backstore_emulate_tas": "1",
+ "backstore_emulate_tpu": "0",
+ "backstore_emulate_tpws": "0",
+ "backstore_emulate_ua_intlck_ctrl": "0",
+ "backstore_emulate_write_cache": "0",
+ "backstore_enforce_pr_isids": "1",
+ "backstore_fabric_max_sectors": "8192",
+ "backstore_hw_block_size": "512",
+ "backstore_hw_max_sectors": "8192",
+ "backstore_hw_pi_prot_type": "0",
+ "backstore_hw_queue_depth": "128",
+ "backstore_is_nonrot": "1",
+ "backstore_max_unmap_block_desc_count": "1",
+ "backstore_max_unmap_lba_count": "8192",
+ "backstore_max_write_same_len": "65535",
+ "backstore_optimal_sectors": "8192",
+ "backstore_pi_prot_format": "0",
+ "backstore_pi_prot_type": "0",
+ "backstore_queue_depth": "128",
+ "backstore_unmap_granularity": "8192",
+ "backstore_unmap_granularity_alignment": "4194304"
+ }
+ ]
+ }
+ ]
+ }
+]
+
+ Description of the options follows:
+
+
+
+ backstore_block_size
+
+
+ Block size of the underlying device.
+
+
+
+
+ backstore_emulate_3pc
+
+
+ If set to 1, enable Third Party Copy.
+
+
+
+
+ backstore_emulate_caw
+
+
+ If set to 1, enable Compare and Write.
+
+
+
+
+ backstore_emulate_dpo
+
+
+ If set to 1, turn on Disable Page Out.
+
+
+
+
+ backstore_emulate_fua_read
+
+
+ If set to 1, enable Force Unit Access read.
+
+
+
+
+ backstore_emulate_fua_write
+
+
+ If set to 1, enable Force Unit Access write.
+
+
+
+
+ backstore_emulate_model_alias
+
+
+ If set to 1, use the back-end device name for the model alias.
+
+
+
+
+ backstore_emulate_rest_reord
+
+
+ If set to 0, the Queue Algorithm Modifier is Restricted Reordering.
+
+
+
+
+ backstore_emulate_tas
+
+
+ If set to 1, enable Task Aborted Status.
+
+
+
+
+ backstore_emulate_tpu
+
+
+ If set to 1, enable Thin Provisioning Unmap.
+
+
+
+
+ backstore_emulate_tpws
+
+
+ If set to 1, enable Thin Provisioning Write Same.
+
+
+
+
+ backstore_emulate_ua_intlck_ctrl
+
+
+ If set to 1, enable Unit Attention Interlock.
+
+
+
+
+ backstore_emulate_write_cache
+
+
+ If set to 1, turn on Write Cache Enable.
+
+
+
+
+ backstore_enforce_pr_isids
+
+
+ If set to 1, enforce persistent reservation ISIDs.
+
+
+
+
+ backstore_fabric_max_sectors
+
+
+ Maximum number of sectors the fabric can transfer at once.
+
+
+
+
+ backstore_hw_block_size
+
+
+ Hardware block size in bytes.
+
+
+
+
+ backstore_hw_max_sectors
+
+
+ Maximum number of sectors the hardware can transfer at once.
+
+
+
+
+ backstore_hw_pi_prot_type
+
+
+ If non-zero, DIF protection is enabled on the underlying hardware.
+
+
+
+
+ backstore_hw_queue_depth
+
+
+ Hardware queue depth.
+
+
+
+
+ backstore_is_nonrot
+
+
+ If set to 1, the backstore is a non rotational device.
+
+
+
+
+ backstore_max_unmap_block_desc_count
+
+
+ Maximum number of block descriptors for UNMAP.
+
+
+
+
+ "backstore_max_unmap_lba_count":
+
+
+ Maximum number of LBA for UNMAP.
+
+
+
+
+ backstore_max_write_same_len
+
+
+ Maximum length for WRITE_SAME.
+
+
+
+
+ backstore_optimal_sectors
+
+
+ Optimal request size in sectors.
+
+
+
+
+ backstore_pi_prot_format
+
+
+ DIF protection format.
+
+
+
+
+ backstore_pi_prot_type
+
+
+ DIF protection type.
+
+
+
+
+ backstore_queue_depth
+
+
+ Queue depth.
+
+
+
+
+ backstore_unmap_granularity
+
+
+ UNMAP granularity.
+
+
+
+
+ backstore_unmap_granularity_alignment
+
+
+ UNMAP granularity alignment.
+
+
+
+
+
+ For targets, the attributes allow tuning of kernel
+ parameters. Use with caution.
+
+"targets": [
+{
+ "host": "igw1",
+ "target": "iqn.2003-01.org.linux-iscsi.generic.x86:sn.abcdefghijk",
+ "tpg_login_timeout": "10",
+ "tpg_default_cmdsn_depth": "64",
+ "tpg_default_erl": "0",
+ "tpg_login_timeout": "10",
+ "tpg_netif_timeout": "2",
+ "tpg_prod_mode_write_protect": "0",
+ "tpg_t10_pi": "0"
+}
+
+
+ If a site needs statically assigned LUNs, then assign numbers to each LUN.
+
+
+
+
+
+ Connecting to lrbd-managed Targets
+
+
+ This chapter describes how to connect to lrdb-managed targets from clients
+ running Linux, Microsoft Windows, or VMware.
+
+
+
+ Linux (open-iscsi)
+
+ Connecting to lrbd-backed iSCSI targets with
+ open-iscsi is a two-step process. First the
+ initiator must discover the iSCSI targets available on the gateway host,
+ then it must log in and map the available Logical Units (LUs).
+
+
+ Both steps require that the open-iscsi daemon is
+ running. The way you start the open-iscsi daemon
+ is dependent on your Linux distribution:
+
+
+
+
+ On SUSE Linux Enterprise Server (SLES); and Red Hat Enterprise Linux (RHEL) hosts, run systemctl start
+ iscsid (or service iscsid start if
+ systemctl is not available).
+
+
+
+
+ On Debian and Ubuntu hosts, run systemctl start
+ open-iscsi (or service open-iscsi start).
+
+
+
+
+ If your initiator host runs SUSE Linux Enterprise Server, refer to
+
+ or
+
+ for details on how to connect to an iSCSI target.
+
+
+ For any other Linux distribution supporting
+ open-iscsi, proceed to discover targets on your
+ lrbd gateway (this example uses iscsi1.example.com
+ as the portal address; for multipath access repeat these steps with
+ iscsi2.example.com):
+
+iscsiadm -m discovery -t sendtargets -p iscsi1.example.com
+192.168.124.104:3260,1 iqn.2003-01.org.linux-iscsi.iscsi.x86:testvol
+
+ Then, log in to the portal. If the login completes successfully, any
+ RBD-backed logical units on the portal will immediately become available on
+ the system SCSI bus:
+
+iscsiadm -m node -p iscsi1.example.com --login
+Logging in to [iface: default, target: iqn.2003-01.org.linux-iscsi.iscsi.x86:testvol, portal: 192.168.124.104,3260] (multiple)
+Login to [iface: default, target: iqn.2003-01.org.linux-iscsi.iscsi.x86:testvol, portal: 192.168.124.104,3260] successful.
+
+ Repeat this process for other portal IP addresses or hosts.
+
+
+ If your system has the lsscsi utility installed,
+ you use it to enumerate available SCSI devices on your system:
+
+lsscsi
+[8:0:0:0] disk SUSE RBD 4.0 /dev/sde
+[9:0:0:0] disk SUSE RBD 4.0 /dev/sdf
+
+ In a multipath configuration (where two connected iSCSI devices represent
+ one and the same LU), you can also examine the multipath device state with
+ the multipath utility:
+
+multipath -ll
+360014050cf9dcfcb2603933ac3298dca dm-9 SUSE,RBD
+size=49G features='0' hwhandler='0' wp=rw
+|-+- policy='service-time 0' prio=1 status=active
+| `- 8:0:0:0 sde 8:64 active ready running
+`-+- policy='service-time 0' prio=1 status=enabled
+`- 9:0:0:0 sdf 8:80 active ready running
+
+ You can now use this multipath device as you would any block device. For
+ example, you can use the device as a Physical Volume for Linux Logical
+ Volume Management (LVM), or you can simply create a file system on it. The
+ example below demonstrates how to create an XFS file system on the newly
+ connected multipath iSCSI volume:
+
+mkfs -t xfs /dev/mapper/360014050cf9dcfcb2603933ac3298dca
+log stripe unit (4194304 bytes) is too large (maximum is 256KiB)
+log stripe unit adjusted to 32KiB
+meta-data=/dev/mapper/360014050cf9dcfcb2603933ac3298dca isize=256 agcount=17, agsize=799744 blks
+ = sectsz=512 attr=2, projid32bit=1
+ = crc=0 finobt=0
+data = bsize=4096 blocks=12800000, imaxpct=25
+ = sunit=1024 swidth=1024 blks
+naming =version 2 bsize=4096 ascii-ci=0 ftype=0
+log =internal log bsize=4096 blocks=6256, version=2
+ = sectsz=512 sunit=8 blks, lazy-count=1
+realtime =none extsz=4096 blocks=0, rtextents=0
+
+ Note that XFS being a non-clustered file system, you may only ever mount it
+ on a single iSCSI initiator node at any given time.
+
+
+ If at any time you want to discontinue using the iSCSI LUs associated with
+ a particular target, run the following command:
+
+iscsiadm -m node -p iscsi1.example.com --logout
+Logging out of session [sid: 18, iqn.2003-01.org.linux-iscsi.iscsi.x86:testvol, portal: 192.168.124.104,3260]
+Logout of [sid: 18, target: iqn.2003-01.org.linux-iscsi.iscsi.x86:testvol, portal: 192.168.124.104,3260] successful.
+
+ As with discovery and login, you must repeat the logout steps for all
+ portal IP addresses or host names.
+
+
+ Multipath Configuration
+
+ The multipath configuration is maintained on the clients or initiators and
+ is independent of any lrbd configuration. Select
+ a strategy prior to using block storage. After editing the
+ /etc/multipath.conf, restart
+ multipathd with
+
+sudo systemctl restart multipathd
+
+ For an active-passive configuration with friendly names, add
+
+defaults {
+ user_friendly_names yes
+}
+
+ to your /etc/multipath.conf. After connecting to your
+ targets successfully, run
+
+multipath -ll
+mpathd (36001405dbb561b2b5e439f0aed2f8e1e) dm-0 SUSE,RBD
+size=2.0G features='0' hwhandler='0' wp=rw
+|-+- policy='service-time 0' prio=1 status=active
+| `- 2:0:0:3 sdl 8:176 active ready running
+|-+- policy='service-time 0' prio=1 status=enabled
+| `- 3:0:0:3 sdj 8:144 active ready running
+`-+- policy='service-time 0' prio=1 status=enabled
+ `- 4:0:0:3 sdk 8:160 active ready running
+
+ Note the status of each link. For an active-active configuration, add
+
+defaults {
+ user_friendly_names yes
+}
+
+devices {
+ device {
+ vendor "(LIO-ORG|SUSE)"
+ product "RBD"
+ path_grouping_policy "multibus"
+ path_checker "tur"
+ features "0"
+ hardware_handler "1 alua"
+ prio "alua"
+ failback "immediate"
+ rr_weight "uniform"
+ no_path_retry 12
+ rr_min_io 100
+ }
+}
+
+ to your /etc/multipath.conf. Restart
+ multipathd and run
+
+multipath -ll
+mpathd (36001405dbb561b2b5e439f0aed2f8e1e) dm-3 SUSE,RBD
+size=2.0G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
+`-+- policy='service-time 0' prio=50 status=active
+ |- 4:0:0:3 sdj 8:144 active ready running
+ |- 3:0:0:3 sdk 8:160 active ready running
+ `- 2:0:0:3 sdl 8:176 active ready running
+
+
+
+
+ Microsoft Windows (Microsoft iSCSI initiator)
+
+ To connect to a SUSE Enterprise Storage iSCSI target from a Windows 2012 server, follow
+ these steps:
+
+
+
+
+ Open Windows Server Manager. From the Dashboard, select
+ ToolsiSCSI
+ Initiator. The iSCSI Initiator
+ Properties dialog appears. Select the
+ Discovery tab:
+
+
+
+
+
+ In the Discover Target Portal dialog, enter the
+ target's host name or IP address in the Target field
+ and click OK:
+
+
+
+
+
+ Repeat this process for all other gateway host names or IP addresses.
+ When completed, review the Target Portals list:
+
+
+
+
+
+ Next, switch to the Targets tab and review your
+ discovered target(s).
+
+
+
+
+
+ Click Connect in the Targets tab.
+ The Connect To Target dialog appears. Select the
+ Enable Multi-path check box to enable multipath I/O
+ (MPIO), then click OK:
+
+
+
+
+ When the Connect to Target dialog closes, select
+ Properties to review the target's properties:
+
+
+
+
+
+ Select Devices, and click MPIO to
+ review the multipath I/O configuration:
+
+
+
+ The default Load Balance policy is Round
+ Robin With Subset. If you prefer a pure fail-over
+ configuration, change it to Fail Over Only.
+
+
+
+
+ This concludes the iSCSI initiator configuration. The iSCSI volumes are now
+ available like any other SCSI devices, and may be initialized for use as
+ volumes and drives. Click OK to close the iSCSI
+ Initiator Properties dialog, and proceed with the File
+ and Storage Services role from the Server
+ Manager dashboard.
+
+
+ Observe the newly connected volume. It identifies as SUSE RBD
+ SCSI Multi-Path Drive on the iSCSI bus, and is initially marked
+ with an Offline status and a partition table type of
+ Unknown. If the new volume does not appear
+ immediately, select Rescan Storage from the
+ Tasks drop-down box to rescan the iSCSI bus.
+
+
+
+
+ Right-click on the iSCSI volume and select New Volume
+ from the context menu. The New Volume Wizard appears.
+ Click Next, highlight the newly connected iSCSI volume
+ and click Next to begin.
+
+
+
+
+
+ Initially, the device is empty and does not contain a partition table.
+ When prompted, confirm the dialog indicating that the volume will be
+ initialized with a GPT partition table:
+
+
+
+
+
+ Select the volume size. Typically, you would use the device's full
+ capacity. Then assign a drive letter or folder name where the newly
+ created volume will become available. Then select a file system to create
+ on the new volume, and finally confirm your selections with
+ Create to finish creating the volume:
+
+
+
+ When the process finishes, review the results, then
+ Close to conclude the drive initialization. Once
+ initialization completes, the volume (and its NTFS file system) becomes
+ available like a newly initialized local drive.
+
+
+
+
+
+
+ VMware
+
+
+
+
+ To connect to lrbd managed iSCSI volumes you
+ need a configured iSCSI software adapter. If no such adapter is available
+ in your vSphere configuration, create one by selecting
+ ConfigurationStorage
+ Adapters AddiSCSI Software
+ initiator.
+
+
+
+
+ When available, select the adapter's properties by right-clicking the
+ adapter and selecting Properties from the context
+ menu:
+
+
+
+
+
+ In the iSCSI Software Initiator dialog, click the
+ Configure button. Then go to the Dynamic
+ Discovery tab and select Add.
+
+
+
+
+ Enter the IP address or host name of your lrbd
+ iSCSI gateway. If you run multiple iSCSI gateways in a failover
+ configuration, repeat this step for as many gateways as you operate.
+
+
+
+ When you have entered all iSCSI gateways, click OK in
+ the dialog to initiate a rescan of the iSCSI adapter.
+
+
+
+
+ When the rescan completes, the new iSCSI device appears below the
+ Storage Adapters list in the
+ Details pane. For multipath devices, you can now
+ right-click on the adapter and select Manage Paths
+ from the context menu:
+
+
+
+ You should now see all paths with a green light under
+ Status. One of your paths should be marked
+ Active (I/O) and all others simply
+ Active:
+
+
+
+
+
+ You can now switch from Storage Adapters to the item
+ labeled Storage. Select Add
+ Storage... in the top-right corner of the pane to bring up the
+ Add Storage dialog. Then, select
+ Disk/LUN and click Next. The newly
+ added iSCSI device appears in the Select Disk/LUN
+ list. Select it, then click Next to proceed:
+
+
+
+ Click Next to accept the default disk layout.
+
+
+
+
+ In the Properties pane, assign a name to the new
+ datastore, and click Next. Accept the default setting
+ to use the volume's entire space for the datastore, or select
+ Custom Space Setting for a smaller datastore:
+
+
+
+ Click Finish to complete the datastore creation.
+
+
+ The new datastore now appears in the datastore list and you can select it
+ to retrieve details. You are now able to use the
+ lrbd-backed iSCSI volume like any other vSphere
+ datastore.
+
+
+
+
+
+
+
+ Conclusion
+
+
+ lrbd is a key component of SUSE Enterprise Storage that enables
+ access to distributed, highly available block storage from any server or
+ client capable of speaking the iSCSI protocol. By using
+ lrbd on one or more iSCSI gateway hosts, Ceph RBD
+ images become available as Logical Units (LUs) associated with iSCSI
+ targets, which can be accessed in an optionally load-balanced, highly
+ available fashion.
+
+
+
+ Since all of lrbd's configuration is stored in the
+ Ceph RADOS object store, lrbd gateway hosts are
+ inherently without persistent state and thus can be replaced, augmented, or
+ reduced at will. As a result, SUSE Enterprise Storage enables SUSE customers to run a
+ truly distributed, highly-available, resilient, and self-healing enterprise
+ storage technology on commodity hardware and an entirely open source
+ platform.
+
+
+
+
+
+
+ Clustered File System
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES4
+
+
+
+ The Ceph file system (CephFS) is a POSIX-compliant file system that uses
+ a Ceph storage cluster to store its data. CephFS uses the same cluster
+ system as Ceph block devices, Ceph object storage with its S3 and Swift
+ APIs, or native bindings (librados).
+
+
+ To use CephFS, you need to have a running Ceph storage cluster, and at
+ least one running Ceph metadata server.
+
+
+
+ CephFS file layout changes can be performed as documented in
+ .
+ However, a data pool must not be added to an existing CephFS file system
+ (via ceph mds add_data_pool) while the file system is
+ mounted by any clients.
+
+
+
+ Ceph Metadata Server
+
+
+ Ceph metadata server (MDS) stores metadata for the CephFS. Ceph block
+ devices and Ceph object storage do not use MDS. MDSs
+ make it possible for POSIX file system users to execute basic
+ commands—such as ls or
+ find—without placing an enormous burden on the
+ Ceph storage cluster.
+
+
+
+ Adding a Metadata Server
+
+ After you deploy OSDs and monitors, you can deploy metadata servers.
+ Although MDS service can share a node with an OSD and/or monitor service,
+ you are encouraged to deploy it on a separate cluster node for performance
+ reasons.
+
+cephadm > ceph-deploy install mds-host-name
+cephadm > ceph-deploy mds create host-name:daemon-name
+
+ You may optionally specify a daemon instance name if you need to run
+ multiple daemons on a single server.
+
+
+ After you deploy your MDS, allow the Ceph OSD/MDS
+ service in the firewall setting of the server where MDS is deployed. Start
+ yast, navigate to Security and
+ Users Firewall Allowed
+ Services and in the Service to
+ Allow drop–down menu select Ceph
+ OSD/MDS. If the Ceph MDS node is not allowed full traffic,
+ mounting of a file system fails, even though other operations may work
+ properly.
+
+
+
+
+ Configuring a Metadata Server
+
+ You can fine-tune the MDS behavior by inserting relevant options in the
+ ceph.conf configuration file. For detailed list of MDS
+ related configuration options, see
+ .
+
+
+ For detailed list of MDS journaler configuration options, see
+ .
+
+
+
+
+
+
+ CephFS
+
+
+ When you have a healthy Ceph storage cluster with at least one Ceph
+ metadata server, you may create and mount your Ceph file system. Ensure
+ that your client has network connectivity and a proper authentication
+ keyring.
+
+
+
+ Creating CephFS
+
+ A CephFS requires at least two RADOS pools: one for
+ data and one for metadata. When
+ configuring these pools, you might consider:
+
+
+
+
+ Using a higher replication level for the metadata pool, as any data loss
+ in this pool can render the whole file system inaccessible.
+
+
+
+
+ Using lower-latency storage such as SSDs for the metadata pool, as this
+ will improve the observed latency of file system operations on clients.
+
+
+
+
+ For more information on managing pools, see .
+
+
+ To create the two required pools—for example 'cephfs_data' and
+ 'cephfs_metadata'—with default settings for use with CephFS, run
+ the following commands:
+
+ceph osd pool create cephfs_data pg_num
+ceph osd pool create cephfs_metadata pg_num
+
+ When the pools are created, you may enable the file system with the
+ ceph fs new command:
+
+ceph fs new fs_name metadata_pool_name data_pool_name
+
+ For example:
+
+ceph fs new cephfs cephfs_metadata cephfs_data
+
+ You can check that the file system was created by listing all available
+ CephFS's:
+
+ceph fs ls
+ name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
+
+ When the file system has been created, your MDS will be able to enter an
+ active state. For example, in a single MDS system:
+
+ceph mds stat
+ e5: 1/1/1 up
+
+
+
+ Mounting CephFS
+
+ Once the file system is created and the MDS is active, you are ready to
+ mount the file system from a client host.
+
+
+ Create a Secret File
+
+ The Ceph cluster runs with authentication turned on by default. You
+ should create a file that stores your secret key (not the keyring itself).
+ To obtain the secret key for a particular user and then create the file,
+ do the following:
+
+
+ Creating a Secret Key
+
+
+ View the key for the particular user in a keyring file:
+
+cat /etc/ceph/ceph.client.admin.keyring
+
+
+
+ Copy the key of the user who will be using the mounted Ceph FS
+ filesystem. Usually the key looks similar like the following:
+
+[client.admin]
+ key = AQCj2YpRiAe6CxAA7/ETt7Hcl9IyxyYciVs47w==
+
+
+
+ Create a file with the user name as a filename part, e.g.
+ /etc/ceph/admin.secret for the user
+ admin
+
+
+
+
+ Paste the key value to the file created in the previous step.
+
+
+
+
+ Set proper access rights to the file. The user should be the only one
+ who can read the file, others may not have any access rights.
+
+
+
+
+
+ Mount CephFS with the Kernel Driver
+
+ You can mount CephFS, normally with the mount
+ command. You need to specify the monitor host name or IP address.
+
+
+ Specify Multiple Monitors
+
+ It is a good idea to specify multiple monitors separated by commas on the
+ mount command line in case one monitor happens to be
+ down at the time of mount. Each monitor address takes the form
+ host[:port]. If the port is not specified, it defaults to 6789.
+
+
+
+ Create the mount point on the local host:
+
+sudo mkdir /mnt/cephfs
+
+ Mount the CephFS:
+
+sudo mount -t ceph ceph_mon1:6789:/ /mnt/cephfs
+
+ A subdirectory subdir may be specified if a subset of
+ the file system is to be mounted:
+
+sudo mount -t ceph ceph_mon1:6789:/subdir /mnt/cephfs
+
+ You can specify more than one monitor host in the mount
+ command:
+
+sudo mount -t ceph ceph_mon1,ceph_mon2,ceph_mon3:6789:/ /mnt/cephfs
+
+ CephFS and cephx Authentication
+
+ To mount CephFS with cephx authentication
+ enabled, you need to specify a user name and a secret:
+
+sudo mount -t ceph ceph_mon1:6789:/ /mnt/cephfs \
+ -o name=admin,secret=AQATSKdNGBnwLhAAnNDKnH65FmVKpXZJVasUeQ==
+
+ As the previous command remains in the shell history, a more secure
+ approach is to read the secret from a file:
+
+sudo mount -t ceph ceph_mon1:6789:/ /mnt/cephfs \
+ -o name=admin,secretfile=/etc/ceph/admin.secret
+
+
+
+
+
+ Unmounting CephFS
+
+ To unmount the CephFS, use the umount command:
+
+sudo umount /mnt/cephfs
+
+
+
+ CephFS in /etc/fstab
+
+ To mount CephFS automatically on the client start-up, insert the
+ corresponding line in its file systems table
+ /etc/fstab:
+
+mon1:6790,mon2:/subdir /mnt/cephfs ceph name=admin,secretfile=/etc/ceph/secret.key,noatime 0 2
+
+
+
+ Managing Failover
+
+
+ If an MDS daemon stops communicating with the monitor, the monitor will wait
+ seconds (default 15 seconds) before
+ marking the daemon as laggy. You can configure one or
+ more 'standby' daemons that can will take over during the MDS daemon
+ failover.
+
+
+
+ Configuring Standby Daemons
+
+ There are several configuration settings that control how a daemon will
+ behave while in standby. You can specify them in the
+ ceph.conf on the host where the MDS daemon runs. The
+ daemon loads these settings when it starts, and sends them to the monitor.
+
+
+ By default, if none of these settings are used, all MDS daemons which do
+ not hold a rank will be used as 'standbys' for any rank.
+
+
+ The settings which associate a standby daemon with a particular name or
+ rank do not guarantee that the daemon will only be used for that rank. They
+ mean that when several standbys are available, the associated standby
+ daemon will be used. If a rank is failed, and a standby is available, it
+ will be used even if it is associated with a different rank or named
+ daemon.
+
+
+
+ mds_standby_replay
+
+
+ If set to true, then the standby daemon will continuously read the
+ metadata journal an up rank. This will give it a warm metadata cache,
+ and speed up the process of failing over if the daemon serving the rank
+ fails.
+
+
+ An up rank may only have one standby replay daemon assigned to it. If
+ two daemons are both set to be standby replay then one of them will
+ arbitrarily win, and the other will become a normal non-replay standby.
+
+
+ Once a daemon has entered the standby replay state, it will only be used
+ as a standby for the rank that it is following. If another rank fails,
+ this standby replay daemon will not be used as a replacement, even if no
+ other standbys are available.
+
+
+
+
+ mds_standby_for_name
+
+
+ Set this to make the standby daemon only take over a failed rank if the
+ last daemon to hold it matches this name.
+
+
+
+
+ mds_standby_for_rank
+
+
+ Set this to make the standby daemon only take over the specified rank.
+ If another rank fails, this daemon will not be used to replace it.
+
+
+ Use in conjunction with to be
+ specific about which file system's rank you are targeting in case of
+ multiple file systems.
+
+
+
+
+ mds_standby_for_fscid
+
+
+ If is set, this is simply a
+ qualifier to say which file system's rank is referred to.
+
+
+ If is not set, then setting FSCID
+ will cause this daemon to target any rank in the specified FSCID. Use
+ this if you have a daemon that you want to use for any rank, but only
+ within a particular file system.
+
+
+
+
+ mon_force_standby_active
+
+
+ This setting is used on monitor hosts. It defaults to true.
+
+
+ If it is false, then daemons configured with
+ will only become active if the
+ rank/name that they have been configured to follow fails. On the other
+ hand, if this setting is true, then a daemon configured with
+ may be assigned some other rank.
+
+
+
+
+
+
+
+ Examples
+
+ Several example ceph.conf configurations follow. You
+ can either copy a ceph.conf with the configuration of
+ all daemons to all your servers, or you can have a different file on each
+ server that contains just that server's daemons configuration.
+
+
+ Simple Pair
+
+ Two MDS daemons 'a' and 'b' acting as a pair. Whichever one is not
+ currently assigned a rank will be the standby replay follower of the
+ other.
+
+[mds.a]
+mds standby replay = true
+mds standby for rank = 0
+
+[mds.b]
+mds standby replay = true
+mds standby for rank = 0
+
+
+
+
+
+
+
+ Managing Cluster with GUI Tools
+
+ openATTIC
+
+ openATTIC is a central storage management system which supports Ceph storage
+ cluster. With openATTIC you can control everything from a central management
+ interface. It is no longer necessary to be familiar with the inner workings
+ of the Ceph storage tools. Cluster management tasks can be carried out by
+ either using openATTIC's intuitive Web interface, or via its REST API.
+
+
+ Installing Required Packages
+
+
+ While you can install and run openATTIC on any existing Ceph cluster node, we
+ recommend to install it on the admin node. openATTIC is included in the SUSE Enterprise Storage
+ extension. To install the required packages, run
+
+
+sudo zypper in openattic
+
+
+
+ openATTIC will work correctly only if it is the only Web-based application on
+ the specific host. Do not share the host with another Web application such
+ as Calamari.
+
+
+
+
+ openATTIC Initial Setup
+
+
+ After the packages are installed, run the actual openATTIC setup:
+
+
+sudo oaconfig install
+
+
+ oaconfig install will start a number of services,
+ initialize the openATTIC database, and scan the system for pools and volumes to
+ include.
+
+
+
+ By default, oaconfig creates an administrative user
+ account openattic, with the same password as the
+ user name. As a security precaution, we strongly recommend to change this
+ password immediately:
+
+
+sudo oaconfig changepassword openattic
+Changing password for user 'openattic'
+Password: <enter password>
+Password (again): <re-enter password>
+Password changed successfully for user 'openattic'
+
+
+
+ Now your openATTIC storage system can be managed by the Web user interface.
+
+
+
+ openATTIC Web User Interface
+
+
+ openATTIC can be managed using a Web user interface. Open a Web browser and
+ navigate to http://www.example.org/openattic. To log in, use the default
+ user name openattic and the corresponding password.
+
+
+
+
+
+ The openATTIC user interface is graphically divided into a top menu pane and a
+ content pane.
+
+
+
+ The right part of the top pane includes a link to the current user settings,
+ and a Logout link. The rest of the top pane includes the
+ main openATTIC menu.
+
+
+
+ The content pane changes depending on which item menu is activated. By
+ default, a Dashboard is displayed showing general Ceph
+ cluster statistics.
+
+
+
+
+
+ Dashboard
+
+
+ Dashboard shows the overall statistics of the running
+ Ceph cluster. By default it shows the following widgets: Ceph
+ Status, Utilization, OSD
+ Status, and Throughput.
+
+
+
+ The Ceph Status widget tells whether the cluster is
+ operating correctly. In case a problem is detected, you can view the
+ detailed error message by clicking the subtitle inside the widget.
+
+
+
+
+
+ The OSD Status widget shows the total number of OSD nodes
+ and the number of online OSD node in the cluster in time.
+
+
+
+
+
+ The Utilization widget shows the storage usage in time.
+ You can activate or deactivate the following charts:
+
+
+
+
+
+ Bytes total - shows the total storage size.
+
+
+
+
+ Bytes available - shows the remaining available space.
+
+
+
+
+ Bytes used - shows the occupied space.
+
+
+
+
+
+
+
+ The Throughput widget shows the read/write per
+ second statistics in time.
+
+
+
+
+
+ More Details on Mouse Over
+
+ If you move the mouse pointer over any of the displayed charts, it will
+ show you more details related to the pointed date and time in a pop-up
+ window.
+
+
+
+
+ Ceph Related Tasks
+
+
+ openATTIC's main menu lists Ceph related tasks. Currently, the following tasks
+ are relevant: OSDs, RBDs,
+ Pools, Nodes and CRUSH
+ Map.
+
+
+
+ Common Web UI Features
+
+ In openATTIC you often work with lists—for example
+ lists of pools, OSD nodes, or RBD devices. The following common widgets
+ help you manage or adjust these list:
+
+
+ Click
+
+
+
+
+
+
+ to refresh the list of items.
+
+
+ Click
+
+
+
+
+
+
+ to display or hide individual table columns.
+
+
+ Click
+
+
+
+
+
+
+ and select how many rows to display on a single page.
+
+
+ Click inside
+
+
+
+
+
+
+ and filter the rows by the typing the string to search
+ for.
+
+
+ Use
+
+
+
+
+
+
+ to change the currently displayed page if the list
+ spans across multiple pages.
+
+
+
+
+ Listing OSD Nodes
+
+ To list all available OSD nodes, click OSDs from the
+ main menu.
+
+
+ The list shows each OSD's name, host name, status, and weight.
+
+
+
+
+
+ Managing RADOS Block Devices (RBDs)
+
+ To list all available RADOS block devices, click RBDs
+ from the main menu.
+
+
+ The list shows each device's name, the related pool name, size of the
+ device, and how many percents are already occupied.
+
+
+
+ To view more detailed information about a device, activate its check box in
+ the very left column:
+
+
+
+ Deleting RBDs
+
+ To delete a device or a group of devicesws, activate their check boxes in
+ the very left column and click Delete in the top left
+ of the RBDs table:
+
+
+
+
+ Adding RBDs
+
+ To add a new device, click Add in the top left of the
+ RBDs table and do the following on the Create RBD
+ screen:
+
+
+
+
+
+ Enter the name of the new device. Refer to
+ for naming limitations.
+
+
+
+
+ Select the cluster that will store the new pool.
+
+
+
+
+ Select the pool from which the new RBD device will be created.
+
+
+
+
+ Specify the size of the new device. If you click the use
+ max link above, the maximum pool size is populated.
+
+
+
+
+ To fine tune the device parameters, click Expert
+ settings and activate or deactivate displayed options.
+
+
+
+
+ Confirm with Create.
+
+
+
+
+
+
+
+ Managing Pools
+
+ More Information on Pools
+
+ For more general information about Ceph pools, refer to
+ . For information specific to erasure coded
+ pools, refer to .
+
+
+
+ To list all available pools, click Pools from the main
+ menu.
+
+
+ The list shows each pool's name, ID, the percentage of used space, the
+ number of placement groups, replica size, type ('replicated' or 'erasure'),
+ erasure code profile, and the crush ruleset.
+
+
+
+ To view more detailed information about a pool, activate its check box in
+ the very left column:
+
+
+
+ Deleting Pools
+
+ To delete a pool or a group of pools, activate their check boxes in the
+ very left column and click Delete in the top left of
+ the pools table:
+
+
+
+
+ Adding Pools
+
+ To add a new pool, click Add in the top left of the
+ pools table and do the following on the Create Ceph
+ pool screen:
+
+
+
+
+
+ Enter the name of the new pool. Refer to
+ for naming limitations.
+
+
+
+
+ Select the cluster that will store the new pool.
+
+
+
+
+ Select the pool type. Pools can be either replicated or erasure coded.
+
+
+
+
+ Specify the number of the pool's placement groups.
+
+
+
+
+ For a replicated pool, specify the replica size.
+
+
+
+
+ Confirm with Create.
+
+
+
+
+
+
+
+ Listing Nodes
+
+ Salt Only Deployment
+
+ The Nodes tab is only available when the cluster is
+ deployed via Salt. Refer to
+ for more information on
+ Salt.
+
+
+
+ Click Nodes from the main menu to view the list of
+ nodes available on the cluster.
+
+
+
+ Each node is represented by its host name, public IP address, cluster ID
+ it belongs to, node role (for example 'admin', 'storage', or 'master'), and
+ key acceptance status.
+
+
+
+
+ Viewing the Cluster CRUSH Map
+
+ Click CRUSH Map from the main menu to view cluster
+ CRUSH Map.
+
+
+
+ In the Physical setup pane, you can see the structure of
+ the cluster as described by the CRUSH Map.
+
+
+ In the Replication rules pane, you can view individual
+ rulesets after selecting one of them from the Content
+ from the drop-down box.
+
+
+
+
+
+
+ Calamari
+
+ Calamari is a management and monitoring system for Ceph storage cluster. It
+ provides a Web user interface that makes Ceph cluster monitoring very
+ simple and handy.
+
+
+ The Calamari installation procedure differs according to the used deployment
+ procedure. If you deployed your Ceph by using
+ ceph-deploy, refer to
+ . If you deployed your
+ cluster by using Crowbar, refer to
+ .
+
+
+ Installing Calamari with ceph-deploy
+
+
+ To install Calamari, do the following:
+
+
+
+
+
+ Install the client part of Calamari:
+
+sudo zypper in romana
+
+
+
+ Initialize Calamari installation. You will be asked for superuser user
+ name and password. These will be needed when logging in to the Web
+ interface after the setup is complete.
+
+sudo calamari-ctl initialize
+[INFO] Loading configuration..
+[INFO] Starting/enabling salt...
+[INFO] Starting/enabling postgres...
+[INFO] Initializing database...
+[INFO] Initializing web interface...
+[INFO] You will now be prompted for login details for the administrative user
+account. This is the account you will use to log into the web interface once
+setup is complete.
+Username (leave blank to use 'root'):
+Email address:
+Password:
+Password (again):
+Superuser created successfully.
+[INFO] Starting/enabling services...
+[INFO] Restarting services...
+[INFO] Complete.
+
+
+
+ Check the firewall status
+
+sudo /sbin/SuSEfirewall2 status
+
+ and if it is off, check its configuration and turn it on with
+
+sudo /sbin/SuSEfirewall2 on
+
+ You can find detailed information in
+ .
+
+
+
+
+
+ In order for Calamari to work correctly, the admin keyring needs to be
+ installed on each monitor node:
+
+cephadm > ceph-deploy admin mon1 mon2 mon3
+
+ where mon1, mon2,
+ or mon3 are the host names of the monitors.
+
+
+
+ Now open your Web browser and point it to the host name/IP address of the
+ server where you installed Calamari. Log in with the credentials you
+ entered when installing the Calamari client. A welcome screen appears,
+ instructing you to enter the ceph-deploy calamari
+ connect command. Switch to the terminal on the Calamari host and
+ enter the following command. Note that the
+ option specifies the host name of the Calamari server to which all the
+ cluster nodes connect to:
+
+cephadm > ceph-deploy calamari connect --master master_host node1 node2 ...
+
+ After the command is successfully finished, reload the Web browser. Now
+ you can monitor your Ceph cluster, OSDs, pools, etc.
+
+
+ Empty Usage Graphs
+
+ If, after having installed Calamari initially, the usage graphs are empty/blank,
+ it is possible that the diamond metrics collector was not automatically
+ installed. To fix this, run salt '*' state.highstate on
+ the Calamari host.
+
+
+
+
+ The Calamari dashboard screen shows the current status of the cluster.
+ This updates regularly, so any change to the cluster state—for
+ example if a node goes offline—should be reflected in Calamari
+ within a few seconds. The Health panel includes a
+ timer to indicate how long it has been since Calamari last saw heartbeat
+ information from the cluster. Normally, this will not be more than one
+ minute old, but in certain failure cases, for example when a network
+ outage occurs or if the cluster loses quorum (that is if more than half
+ of the monitor nodes are down), Calamari will no longer be able to
+ determine cluster state. In this case, the Health
+ panel will indicate that the last update was more than one minute ago.
+ After too long time with no updates, Calamari displays a warning at the
+ top of the screen "Cluster Updates Are Stale. The Cluster is not updating
+ Calamari." If this occurs, the other status information Calamari presents
+ will not be correct so you should investigate further to check the status
+ of your storage nodes and network.
+
+
+
+ Salt Installed by Default with Calamari
+
+ Even though you deployed your Ceph cluster by using
+ ceph-deploy, salt is installed along with Calamari.
+ The salt command is thus installed even though you
+ did not install salt manually.
+
+
+
+
+ They may be leftovers of the previous Calamari setup on the system. If
+ after logging in to the Calamari application some nodes are already
+ joined or registered, run the following on the Calamari host to trigger a
+ re-run of salt on all Ceph nodes, which should clear up any odd state
+ or missing bits and pieces.
+
+salt '*' state.highstate
+
+ We also recommend to remove files from the previous Calamari setup, such
+ as state files, configuration files, or PostgreSQL database files. At
+ minimum, remove the files in the following directories:
+
+
+
+
+ /etc/calamari/
+
+
+
+
+ /etc/salt/
+
+
+
+
+ /etc/graphite/
+
+
+
+
+ /var/*/salt/
+
+
+
+
+ /var/lib/graphite/
+
+
+
+
+ /var/lib/pgsql/
+
+
+
+
+
+
+
+
+ Installing Calamari Using Crowbar
+
+
+ Conflicts in Combination with Deployment using Crowbar
+
+ If you used Crowbar to install SUSE Enterprise Storage, install Calamari on a different
+ server than Crowbar as Crowbar uses the same port as Calamari (port 80).
+
+
+
+
+ Use the Crowbar UI to deploy Calamari as described in
+ .
+
+
+
+
+
+ Integration with Virtualization Tools
+
+ Using libvirt with Ceph
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES4
+
+
+
+ The libvirt library creates a virtual machine abstraction layer between
+ hypervisor interfaces and the software applications that use them. With
+ libvirt, developers and system administrators can focus on a common
+ management framework, common API, and common shell interface
+ (virsh) to many different hypervisors, including
+ QEMU/KVM, Xen, LXC, or VirtualBox.
+
+
+ Ceph block devices support QEMU/KVM. You can use Ceph block devices
+ with software that interfaces with libvirt. The cloud solution uses
+ libvirt to interact with QEMU/KVM, and QEMU/KVM interacts with Ceph
+ block devices via librbd.
+
+
+ To create VMs that use Ceph block devices, use the procedures in the
+ following sections. In the examples, we have used
+ libvirt-pool for the pool name,
+ client.libvirt for the user name, and
+ new-libvirt-image for the image name. You may use any
+ value you like, but ensure you replace those values when executing commands
+ in the subsequent procedures.
+
+
+ Configuring Ceph
+
+
+ To configure Ceph for use with libvirt, perform the following steps:
+
+
+
+
+
+ Create a pool. The following example uses the pool name
+ libvirt-pool with 128 placement groups.
+
+ceph osd pool create libvirt-pool 128 128
+
+ Verify that the pool exists.
+
+ceph osd lspools
+
+
+
+ Create a Ceph User. The following example uses the Ceph user name
+ client.libvirt and references
+ libvirt-pool.
+
+ceph auth get-or-create client.libvirt mon 'allow r' osd \
+ 'allow class-read object_prefix rbd_children, allow rwx pool=libvirt-pool'
+
+ Verify the name exists.
+
+ceph auth list
+
+
+ libvirt will access Ceph using the ID libvirt, not
+ the Ceph name client.libvirt. See
+
+ for a detailed explanation of the difference between ID and name.
+
+
+
+
+
+ Use QEMU to create an image in your RBD pool. The following example uses
+ the image name new-libvirt-image and references
+ libvirt-pool.
+
+qemu-img create -f rbd rbd:libvirt-pool/new-libvirt-image 2G
+
+ Verify the image exists.
+
+rbd -p libvirt-pool ls
+
+
+
+
+ Preparing the VM Manager
+
+
+ You may use libvirt without a VM manager, but you may find it simpler to
+ create your first domain with virt-manager.
+
+
+
+
+
+ Install a virtual machine manager.
+
+sudo zypper in virt-manager
+
+
+
+ Prepare/download an OS image of the system you want to run virtualized.
+
+
+
+
+ Launch the virtual machine manager.
+
+virt-manager
+
+
+
+
+ Creating a VM
+
+
+ To create a VM with virt-manager, perform the following
+ steps:
+
+
+
+
+
+ Choose the connection from the list, right-click it, and select
+ New.
+
+
+
+
+ Import existing disk image by providing the path to the
+ existing storage. Specify OS type, memory settings, and
+ Name the virtual machine, for example
+ libvirt-virtual-machine.
+
+
+
+
+ Finish the configuration and start the VM.
+
+
+
+
+ Verify that the newly created domain exists with sudo virsh
+ list. If needed, specify the connection string, such as
+
+virsh -c qemu+ssh://root@vm_host_hostname/system list
+Id Name State
+-----------------------------------------------
+[...]
+ 9 libvirt-virtual-machine running
+
+
+
+ Log in to the VM and stop it before configuring it for use with Ceph.
+
+
+
+
+
+ Configuring the VM
+
+
+ When configuring the VM for use with Ceph, it is important to use
+ virsh where appropriate. Additionally,
+ virsh commands often require root privileges
+ (sudo) and will not return appropriate results or notify
+ you that root privileges are required. For a reference of
+ virsh commands, refer to
+ Virsh Command
+ Reference.
+
+
+
+
+
+ Open the configuration file with virsh edit
+ vm-domain-name.
+
+sudo virsh edit libvirt-virtual-machine
+
+
+
+ Under <devices> there should be a <disk> entry.
+
+<devices>
+ <emulator>/usr/bin/qemu-system-x86_64</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='/path/to/image/recent-linux.img'/>
+ <target dev='vda' bus='virtio'/>
+ <address type='drive' controller='0' bus='0' unit='0'/>
+ </disk>
+
+ Replace /path/to/image/recent-linux.img with the path
+ to the OS image.
+
+
+
+ Use sudo virsh edit instead of a text editor. If you
+ edit the configuration file under /etc/libvirt/qemu
+ with a text editor, libvirt may not recognize the change. If there is a
+ discrepancy between the contents of the XML file under
+ /etc/libvirt/qemu and the result of sudo
+ virsh dumpxml vm-domain-name, then
+ your VM may not work properly.
+
+
+
+
+
+ Add the Ceph RBD image you previously created as a <disk> entry.
+
+<disk type='network' device='disk'>
+ <source protocol='rbd' name='libvirt-pool/new-libvirt-image'>
+ <host name='monitor-host' port='6789'/>
+ </source>
+ <target dev='vda' bus='virtio'/>
+</disk>
+
+ Replace monitor-host with the name of your
+ host, and replace the pool and/or image name as necessary. You may add
+ multiple <host> entries for your Ceph monitors. The
+ dev attribute is the logical device name that will
+ appear under the /dev directory of your VM. The
+ optional bus attribute indicates the type of disk device to emulate. The
+ valid settings are driver specific (for example ide, scsi, virtio, xen,
+ usb or sata). See
+ Disks
+ for details of the <disk> element, and its child elements and
+ attributes.
+
+
+
+
+ Save the file.
+
+
+
+
+ If your Ceph cluster has authentication enabled (it does by default),
+ you must generate a secret.
+
+cat > secret.xml <<EOF
+<secret ephemeral='no' private='no'>
+ <usage type='ceph'>
+ <name>client.libvirt secret</name>
+ </usage>
+</secret>
+EOF
+
+
+
+ Define the secret.
+
+sudo virsh secret-define --file secret.xml
+<uuid of secret is output here>
+
+
+
+ Get the client.libvirt key and save the key string to a
+ file.
+
+ceph auth get-key client.libvirt | sudo tee client.libvirt.key
+
+
+
+ Set the UUID of the secret.
+
+sudo virsh secret-set-value --secret uuid of secret \
+--base64 $(cat client.libvirt.key) && rm client.libvirt.key secret.xml
+
+ You must also set the secret manually by adding the following
+ &auth> entry to the
+ &disk> element you entered earlier (replacing
+ the uuid value with the result from the command line example above).
+
+sudo virsh edit libvirt-virtual-machine
+
+ Then, add &auth>&/auth> element to the
+ domain configuration file:
+
+...
+&/source>
+&auth username='libvirt'>
+ &secret type='ceph' uuid='9ec59067-fdbc-a6c0-03ff-df165c0587b8'/>
+&/auth>
+&target ...
+
+
+ The exemplary ID is libvirt, not the Ceph name
+ client.libvirt as generated at step 2 of
+ . Ensure you use the ID component
+ of the Ceph name you generated. If for some reason you need to regenerate
+ the secret, you will need to execute sudo virsh
+ secret-undefine uuid before
+ executing sudo virsh secret-set-value again.
+
+
+
+
+
+
+ Summary
+
+
+ Once you have configured the VM for use with Ceph, you can start the VM.
+ To verify that the VM and Ceph are communicating, you may perform the
+ following procedures.
+
+
+
+
+
+ Check to see if Ceph is running:
+
+ceph health
+
+
+
+ Check to see if the VM is running:
+
+sudo virsh list
+
+
+
+ Check to see if the VM is communicating with Ceph. Replace
+ vm-domain-name with the name of your VM domain:
+
+sudo virsh qemu-monitor-command --hmp vm-domain-name 'info block'
+
+
+
+ Check to see if the device from &target dev='hdb'
+ bus='ide'/> appears under /dev or under
+ /proc/partitions:
+
+ls /dev
+cat /proc/partitions
+
+
+
+
+
+ Ceph as a Back-end for QEMU KVM Instance
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES2.1
+
+
+
+ The most frequent Ceph use case involves providing block device images to
+ virtual machines. For example, a user may create a 'golden' image with an OS
+ and any relevant software in an ideal configuration. Then, the user takes a
+ snapshot of the image. Finally, the user clones the snapshot (usually many
+ times, see for details). The ability to
+ make copy-on-write clones of a snapshot means that Ceph can provision block
+ device images to virtual machines quickly, because the client does not need
+ to download an entire image each time it spins up a new virtual machine.
+
+
+ Ceph block devices can integrate with the QEMU virtual machines. For more
+ information on QEMU KVM, see
+ .
+
+
+ Installation
+
+
+ In order to use Ceph block devices, QEMU needs to have the appropriate
+ driver installed. Check whether the qemu-block-rbd
+ package is installed, and install it if needed:
+
+
+sudo zypper install qemu-block-rbd
+
+
+ Usage
+
+
+ The QEMU command line expects you to specify the pool name and image name.
+ You may also specify a snapshot name.
+
+
+qemu-img command options \
+rbd:pool-name/image-name@snapshot-name:option1=value1:option2=value2...
+
+
+ For example, specifying the id and
+ conf options might look like the following:
+
+
+qemu-img command options \
+rbd:pool_name/image_name:
+
+
+ Creating Images with QEMU
+
+
+ You can create a block device image from QEMU. You must specify
+ rbd, the pool name, and the name of the image you want to
+ create. You must also specify the size of the image.
+
+
+qemu-img create -f raw rbd:pool-name/image-name size
+
+
+ For example:
+
+
+qemu-img create -f raw rbd:pool1/image1 10G
+Formatting 'rbd:pool1/image1', fmt=raw size=10737418240 nocow=off cluster_size=0
+
+
+
+ The raw data format is really the only sensible format
+ option to use with RBD. Technically, you could use other QEMU-supported
+ formats such as qcow2, but doing so would add additional
+ overhead, and would also render the volume unsafe for virtual machine live
+ migration when caching is enabled.
+
+
+
+
+ Resizing Images with QEMU
+
+
+ You can resize a block device image from QEMU. You must specify
+ rbd, the pool name, and the name of the image you want to
+ resize. You must also specify the size of the image.
+
+
+qemu-img resize rbd:pool-name/image-name size
+
+
+ For example:
+
+
+qemu-img resize rbd:pool1/image1 9G
+Image resized.
+
+
+ Retrieving Image Info with QEMU
+
+
+ You can retrieve block device image information from QEMU. You must
+ specify rbd, the pool name, and the name of the image.
+
+
+qemu-img info rbd:pool-name/image-name
+
+
+ For example:
+
+
+qemu-img info rbd:pool1/image1
+image: rbd:pool1/image1
+file format: raw
+virtual size: 9.0G (9663676416 bytes)
+disk size: unavailable
+cluster_size: 4194304
+
+
+ Running QEMU with RBD
+
+
+ QEMU can access an image as a virtual block device directly via
+ librbd. This avoids an additional context switch,
+ and can take advantage of RBD caching.
+
+
+
+ You can use qemu-img to convert existing virtual machine
+ images to Ceph block device images. For example, if you have a qcow2
+ image, you could run:
+
+
+qemu-img convert -f qcow2 -O raw sles12.qcow2 rbd:pool1/sles12
+
+
+ To run a virtual machine booting from that image, you could run:
+
+
+qemu -m 1024 -drive format=raw,file=rbd:pool1/sles12
+
+
+ RBD
+ caching can significantly improve performance. QEMU’s cache
+ options control librbd caching:
+
+
+qemu -m 1024 -drive format=rbd,file=rbd:pool1/sles12,cache=writeback
+
+
+ Enabling Discard/TRIM
+
+
+ Ceph block devices support the discard operation. This means that a guest
+ can send TRIM requests to let a Ceph block device reclaim unused space.
+ This can be enabled in the guest by mounting XFS
+ with the discard option.
+
+
+
+ For this to be available to the guest, it must be explicitly enabled for the
+ block device. To do this, you must specify a
+ associated with the drive:
+
+
+qemu -m 1024 -drive format=raw,file=rbd:pool1/sles12,id=drive1,if=none \
+-device driver=ide-hd,drive=drive1,discard_granularity=512
+
+
+
+ The above example uses the IDE driver. The virtio driver does not support
+ discard.
+
+
+
+
+ If using libvirt, edit your libvirt domain’s
+ configuration file using virsh edit to include the
+ xmlns:qemu value. Then, add a qemu:commandline
+ block as a child of that domain. The following example shows how
+ to set two devices with qemu id= to different
+ discard_granularity values.
+
+
+
+<domain type='kvm' xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>
+ <qemu:commandline>
+ <qemu:arg value='-set'/>
+ <qemu:arg value='block.scsi0-0-0.discard_granularity=4096'/>
+ <qemu:arg value='-set'/>
+ <qemu:arg value='block.scsi0-0-1.discard_granularity=65536'/>
+ </qemu:commandline>
+</domain>
+
+
+ QEMU Cache Options
+
+
+ QEMU’s cache options correspond to the following Ceph RBD Cache
+ settings.
+
+
+
+ Writeback:
+
+
+rbd_cache = true
+
+
+ Writethrough:
+
+
+rbd_cache = true
+rbd_cache_max_dirty = 0
+
+
+ None:
+
+
+rbd_cache = false
+
+
+ QEMU’s cache settings override Ceph’s default settings (settings
+ that are not explicitly set in the Ceph configuration file). If you
+ explicitly set
+ RBD
+ Cache settings in your Ceph configuration file, your Ceph
+ settings override the QEMU cache settings. If you set cache settings on
+ the QEMU command line, the QEMU command line settings override the
+ Ceph configuration file settings.
+
+
+
+
+
+ Best Practices
+
+ Introduction
+
+ This chapter introduces a list of selected topics which you may encounter
+ when managing the Ceph environment. To every topic there is a recommended
+ solution that helps you understand or fix the existing problem. The topics
+ are sorted into relevant categories.
+
+
+ Reporting Software Problems
+
+
+ If you come across a problem when running SUSE Enterprise Storage related to some of its
+ components, such as Ceph, RADOS Gateway, or Calamari, report the problem to SUSE
+ Technical Support. The recommended way is with the
+ supportconfig utility.
+
+
+
+
+ Because supportconfig is modular software, make sure
+ that the supportutils-plugin-ses package is
+ installed.
+
+rpm -q supportutils-plugin-ses
+
+ If it is missing on the Ceph server, install it with
+
+zypper ref && zypper in supportutils-plugin-ses
+
+
+
+ Although you can use supportconfig on the command line,
+ we recommend using the related YaST module. Find more information about
+ supportconfig in
+ .
+
+
+
+
+ Hardware Recommendations
+
+ Can I Reduce Data Replication
+
+
+ Ceph stores data within pools. Pools are logical groups for storing
+ objects. Data objects within a pool are replicated so that they can be
+ recovered when OSDs fail. New pools are created with the default of three
+ replicas. This number includes the 'original' data object itself. Three
+ replicas then mean the data object and two its copies for a total of three
+ instances.
+
+
+
+ You can manually change the number of pool replicas (see
+ ). Setting a pool to two
+ replicas means that there is only one copy of the data
+ object besides the object itself, so if you lose one object instance, you
+ need to trust that the other copy has not been corrupted for example since
+ the last
+ scrubbing
+ during recovery.
+
+
+
+ Setting a pool to one replica means that there is exactly
+ one instance of the data object in the pool. If the OSD
+ fails, you lose the data. A possible usage for a pool with one replica is
+ storing temporary data for a short time.
+
+
+
+ Setting more than three replicas for a pool means only a small increase in
+ reliability, but may be suitable in rare cases. Remember that the more
+ replicas, tho more disk space needed for storing the object copies. If you
+ need the ultimate data security, we recommend using erasure coded pools. For
+ more information, see .
+
+
+
+
+ We strongly encourage you to either leave the number of replicas for a pool
+ at the default value of 3, or use higher value if suitable. Setting the
+ number of replicas to a smaller number is dangerous and may cause the loss
+ of data stored in the cluster.
+
+
+
+
+ Can I Reduce Redundancy Similar to RAID 6 Arrays?
+
+
+ When creating a new pool, Ceph uses the replica type by default, which
+ replicates objects across multiple disks to be able to recover from an OSD
+ failure. While this type of pool is safe, it uses a lot of disk space to
+ store objects.
+
+
+
+ To reduce the disk space needed, Ceph implements erasure
+ coded pools. This method adds extra chunks of data to detect
+ errors in a data stream. Erasure coded pools exhibit similar performance,
+ reliability, and storage saved as RAID 6 arrays.
+
+
+
+ As erasure coding is a complex topic, you need to study it properly to be
+ able to deploy it for optimum performance. For more information, see
+ .
+
+
+
+ What is the Minimum Disk Size for an OSD node?
+
+
+ There are two types of disk space needed to run on OSD: the space for the
+ disk journal, and the space for the stored data. The minimum (and default)
+ value for the journal is 6GB. The minimum space for data is 5GB as
+ partitions smaller than 5GB are automatically assigned the weight of 0.
+
+
+
+ So although the minimum disk space for an OSD is 11GB, we do not recommend a
+ disk smaller than 20GB, even for testing purposes.
+
+
+
+ How Much RAM Do I Need in a Storage Server?
+
+
+ The recommended minimum is 2GB per OSD. Note that during recovery, 1 or even
+ 2GB of RAM per terabyte of OSD disk space is optimal.
+
+
+
+ OSD and Monitor Sharing One Server
+
+
+ Although it is technically possible to run OSDs and monitor nodes on the
+ same server in test environments, we strongly recommend having a separate
+ server for each monitor node in production. The main reason is
+ performance—the more OSDs the cluster has, the more I/O operations the
+ monitor nodes need to perform. And when one server is shared between a
+ monitor node and OSD(s), the OSD I/O operations are a limiting factor for
+ the monitor node.
+
+
+
+ Another aspect is whether to share disks between an OSD, a monitor node, and
+ the operating system on the server. The answer is simple: if possible,
+ dedicate a separate disk to OSD, and a separate server to a monitor node.
+
+
+
+ Although Ceph supports directory-based OSDs, an OSD should always have a
+ dedicated disk other than the operating system one.
+
+
+
+
+ If it is really necessary to run OSD and monitor node
+ on the same server, run the monitor on a separate disk by mounting the disk
+ to the /var/lib/ceph/mon directory for slightly better
+ performance.
+
+
+
+
+ How Many Disks Can I Have in a Server
+
+
+ You can have as many disks in one server as it allows. There are a few
+ things to consider when planning the number of disks per server:
+
+
+
+
+
+ Network bandwidth. The more disks you have in a
+ server, the more data must be transferred via the network card(s) for the
+ disk write operations.
+
+
+
+
+ Memory. For optimum performance, reserve at least 2GB
+ of RAM per terabyte of disk space installed.
+
+
+
+
+ Fault tolerance. If the complete server fails, the
+ more disks it has, the more OSDs the cluster temporarily loses. Moreover,
+ to keep the replication rules running, you need to copy all the data from
+ the failed server between the other nodes in the cluster.
+
+
+
+
+
+ How Many OSDs Can Share a Single SSD Journal
+
+
+ Solid-state drives (SSD) have no moving parts. This reduces random access
+ time and read latency while accelerating data throughput. Because their
+ price per 1MB is significantly higher than the price of spinning hard disks,
+ SSDs are only suitable for smaller storage.
+
+
+
+ OSDs may see a significant performance improvement by storing their journal
+ on an SSD and the object data on a separate hard disk. The configuration setting defaults to
+ /var/lib/ceph/osd/cluster-id/journal.
+ You can mount this path to an SSD or to an SSD partition so that it is not
+ merely a file on the same disk as the object data.
+
+
+
+ Sharing an SSD for Multiple Journals
+
+ As journal data occupies relatively small space, you can mount several
+ journal directories to a single SSD disk. Keep in mind that with each
+ shared journal, the performance of the SSD disk degrades. We do not
+ recommend sharing more than 6 journals on the same SSD disk.
+
+
+
+
+
+ Cluster Administration
+
+ The chapter describes some useful operations that can be performed after the
+ cluster is completely deployed and running, like adding nodes, disks.
+
+
+ Using ceph-deploy on an Already Setup Server
+
+
+ ceph-deploy is a command line utility to easily deploy a
+ Ceph cluster (see ). After the
+ cluster is deployed, you can use ceph-deploy to
+ administer the clusters' nodes. You can add OSD nodes, monitor nodes, gather
+ authentication keys, or purge a running cluster.
+ ceph-deploy has the following general syntax:
+
+
+ceph-deploy subcommands options
+
+
+ A list of selected ceph-deploy subcommands with short
+ descriptions follow.
+
+
+
+
+ Administer Ceph nodes with ceph-deploy from the admin
+ node. Before administering them, always create a new temporary directory
+ and cd into it. Then choose one monitor node and gather
+ the authentication keys with the gatherkeys subcommand
+ from it, and copy the /etc/ceph/ceph.conf file from
+ the monitor node into the current local directory.
+
+cephadm > mkdir ceph_tmp
+cephadm > cd ceph_tmp
+cephadm > ceph-deploy gatherkeys ceph_mon_host
+cephadm > scp ceph_mon_host:/etc/ceph/ceph.conf .
+
+
+
+
+ gatherkeys
+
+
+ Gather authentication keys for provisioning new nodes. It takes host
+ names as arguments. It checks for and fetches client.admin
+ keyring, monitor keyring and
+ bootstrap-mds/bootstrap-osd keyring from monitor host.
+ These authentication keys are used when new
+ monitors/OSDs/MDS are added to the cluster.
+
+
+ Usage:
+
+ceph-deploy gatherkeys hostname
+
+ hostname is the host name of the monitor from
+ where keys are to be pulled.
+
+
+
+
+ mon add
+
+
+ Adds a monitor to an existing cluster. It first detects the platform and
+ distribution for the target host, and checks if the host name is
+ compatible for deployment. It then uses the monitor keyring, ensures
+ configuration for new monitor host and adds the monitor to the cluster.
+ If the section for the monitor exists, it can define the monitor address
+ by the mon addr option, otherwise it will fall back by
+ resolving the host name to an IP. If is used,
+ it will override all other options. After adding the monitor to the
+ cluster, it gives it some time to start. It then looks for any monitor
+ errors, and checks monitor status. Monitor errors arise if the monitor is
+ not added in the option, if it does
+ not exist in , or if neither
+ nor keys
+ were defined for monitors. Under such conditions, monitors may not be
+ able to form quorum. Monitor status tells if the monitor is up and
+ running normally. The status is checked by running ceph daemon
+ mon.hostname mon_status on remote end which provides the output
+ and returns a Boolean status of what is going on.
+ False means a monitor that is not fine even if it is
+ up and running, while True means the monitor is up and
+ running correctly.
+
+
+ Usage:
+
+ceph-deploy mon add host
+ceph-deploy mon add host
+
+ host is the host name and
+ IP is the IP address of the desired monitor
+ node.
+
+
+
+
+ osd prepare
+
+
+ Prepares a directory, disk or drive for a Ceph OSD. It first checks
+ against multiple OSDs getting created and warns about the possibility of
+ more than the recommended which would cause issues with max allowed PIDs
+ in a system. It then reads the bootstrap-osd key for the cluster or
+ writes the bootstrap key if not found. It then uses
+ ceph-disk utility’s prepare
+ subcommand to prepare the disk and journal and deploy the OSD on the
+ desired host. It gives some time to the OSD to settle and checks for any
+ possible errors and if found, reports them to the user.
+
+
+ Usage:
+
+ceph-deploy osd prepare host:disk[journal] ...
+
+
+
+ osd activate
+
+
+ Activates the OSD prepared using the prepare
+ subcommand. It actually uses ceph-disk utility’s
+ activate subcommand to activate the OSD with the
+ appropriate initialization type based on the distribution. When
+ activated, it gives some time to the OSD to start and checks for any
+ possible errors and if found, reports to the user. It checks the status
+ of the prepared OSD, checks the OSD tree and makes sure the OSDs are up
+ and in.
+
+
+
+ osd activate is usually not needed as
+ udev rules explicitly trigger "activate" after
+ a disk is prepared after osd prepare.
+
+
+
+ Usage:
+
+ceph-deploy osd activate host:disk[journal] ...
+
+
+ You can use ceph-deploy osd create to join
+ prepare and activate functionality
+ into one command.
+
+
+
+
+
+ rgw prepare/activate/create
+
+
+ Find more information in .
+
+
+
+
+ purge, purgedata, forgetkeys
+
+
+ You can use the subcommands to completely purge the Ceph cluster (or
+ some of its nodes) as if Ceph was never installed on the cluster
+ servers. They are typically used when Ceph installation fails and you
+ want to start with a clean environment. Or, you can purge one or more
+ nodes because you want to remove them from the cluster as their
+ life-cycle ends.
+
+
+ For more information on purging the cluster or its nodes, see
+ .
+
+
+
+ If you do not intend to purge the whole cluster, do not use the
+ forgetkeys subcommand, as the keys will remain in
+ place for the remaining cluster infrastructure.
+
+
+
+
+
+
+
+ Adding OSDs with ceph-disk
+
+
+ ceph-disk is a utility that can prepare and activate a
+ disk, partition or directory as a Ceph OSD. It automates the multiple
+ steps involved in manual creation and start of an OSD into two steps of
+ preparing and activating the OSD by using the subcommands
+ prepare and activate.
+
+
+
+
+ prepare
+
+
+
+ Prepares a directory, disk or drive for a Ceph OSD. It creates a GPT
+ partition, marks the partition with Ceph type uuid, creates a file
+ system, marks the file system as ready for Ceph consumption, uses
+ entire partition and adds a new partition to the journal disk.
+
+
+
+
+ activate
+
+
+
+ Activates the Ceph OSD. It mounts the volume in a temporary location,
+ allocates an OSD ID (if needed), remounts in the correct location
+ /var/lib/ceph/osd/cluster-id
+ and starts ceph-osd.
+
+
+
+
+
+
+ The following example shows steps for adding an OSD with
+ ceph-osd.
+
+
+
+
+
+ Make sure a new disk is physically present on the node where you want to
+ add the OSD. In our example, it is node1 belonging to
+ cluster ceph.
+
+
+
+
+ ssh to node1.
+
+
+
+
+ Generate a unique identification for the new OSD:
+
+uuidgen
+c70c032a-6e88-4962-8376-4aa119cb52ee
+
+
+
+ Prepare the disk:
+
+sudo ceph-disk prepare --cluster ceph \
+--cluster-uuid c70c032a-6e88-4962-8376-4aa119cb52ee --fs-type xfs /dev/hdd1
+
+
+
+ Activate the OSD:
+
+sudo ceph-disk activate /dev/hdd1
+
+
+
+
+ Adding OSDs with ceph-deploy
+
+
+ ceph-deploy is a command line utility to simplify the
+ installation and configuration of a Ceph cluster. It can be used to add or
+ remove OSDs as well. To add a new OSD to a node node2
+ with ceph-deploy, follow these steps:
+
+
+
+
+ ceph-deploy is usually run from the administration node,
+ from which you installed the cluster.
+
+
+
+
+
+
+ List available disks on a node:
+
+ceph-deploy disk list node2
+[...]
+[node2][DEBUG ] /dev/sr0 other, unknown
+[node2][DEBUG ] /dev/vda :
+[node2][DEBUG ] /dev/vda1 swap, swap
+[node2][DEBUG ] /dev/vda2 other, btrfs, mounted on /
+[node2][DEBUG ] /dev/vdb :
+[node2][DEBUG ] /dev/vdb1 ceph data, active, cluster ceph, osd.1, journal /dev/vdb2
+[node2][DEBUG ] /dev/vdb2 ceph journal, for /dev/vdb1
+[node2][DEBUG ] /dev/vdc other, unknown
+
+ /dev/vdc seems to be unused, so let us focus on
+ adding it as an OSD.
+
+
+
+
+ Zap the disk. Zapping deletes the disk's partition table.
+
+ceph-deploy disk zap node2:vdc
+
+
+ Zapping deletes all data from the disk
+
+
+
+
+
+ Prepare the OSD. The prepare command expects you to
+ specify the disk for data, and optionally the disk for its journal. We
+ recommend storing the journal on a separate drive to maximize throughput.
+
+ceph-deploy osd prepare node2:vdc:/dev/ssd
+
+
+
+
+
+ Adding and Removing Monitors
+
+
+ With ceph-deploy, adding and removing monitors is a
+ simple task. Also, take into account the following
+ restrictions/recommendation.
+
+
+
+
+
+
+ ceph-deploy restricts you to only install one monitor
+ per host.
+
+
+
+
+ We do not recommend mixing monitors and OSDs on the same host.
+
+
+
+
+ For high availability, you should run a production Ceph cluster with
+ at least three monitors.
+
+
+
+
+
+
+ Adding a Monitor
+
+ After you create a cluster and install Ceph packages to the monitor
+ host(s) (see for more
+ information), you may deploy the monitors to the monitor hosts. You may
+ specify more monitor host names in the same command.
+
+ceph-deploy mon create host-name
+
+
+ When adding a monitor on a host that was not in hosts initially defined
+ with the ceph-deploy new command, a statement needs to be added to the
+ ceph.conf file.
+
+
+
+
+
+ Removing a Monitor
+
+ If you have a monitor in your cluster that you want to remove, you may use
+ the destroy option. You may specify more monitor host names in the same
+ command.
+
+ceph-deploy mon destroy host-name
+
+
+ Ensure that if you remove a monitor, the remaining monitors will be able
+ to establish a consensus. If that is not possible, consider adding a
+ monitor before removing the monitor you want to take offline.
+
+
+
+
+
+ Usage of ceph-deploy rgw
+
+
+ The ceph-deploy script includes the
+ rgw component that helps you manage RADOS Gateway instances. Its
+ general form follows this pattern:
+
+
+ceph-deploy rgw subcommand rgw-host:rgw-instance:fqdn:port:redirect
+
+
+
+ subcommand
+
+
+ One of list, prepare,
+ activate, create (=
+ prepare + activate), or
+ delete.
+
+
+
+
+ rgw-host
+
+
+ Host name where you want to operate the RADOS Gateway.
+
+
+
+
+ rgw-instance
+
+
+ Ceph instance name. Default is 'rgw-host'.
+
+
+
+
+ fqdn
+
+
+ Virtual host to listen to. Default is 'None'.
+
+
+
+
+ port
+
+
+ Port to listen to. Default is 80.
+
+
+
+
+ redirect
+
+
+ The URL redirect. Default is '^/(.*)'.
+
+
+
+
+
+
+ For example:
+
+
+ceph-deploy rgw prepare example_host2:gateway1
+
+
+ or
+
+
+ceph-deploy activate example_host1:gateway1:virtual_srv2:81
+
+
+ Specifying Multiple RADOS Gateway Instances
+
+ You can specify more pairs on
+ the same command line if you separate them with a comma:
+
+ceph-deploy rgw create hostname1:rgw,hostname2:rgw,hostname3:rgw
+
+
+
+ For a practical example of setting RADOS Gateway with
+ ceph-deploy, see .
+
+
+
+ RADOS Gateway Client Usage
+
+
+ To use RADOS Gateway REST interfaces, you need to create a user for the S3
+ interface, then a subuser for the Swift interface. Find more information on
+ creating RADOS Gateway users in and
+ .
+
+
+
+ S3 Interface Access
+
+ To access the S3 interface, you need to write a Python script. The script
+ will connect to RADOS Gateway, create a new bucket, and list all buckets. The
+ values for and
+ are taken from the values of
+ and returned by the
+ radosgw_admin command from
+ .
+
+
+
+
+ Install the python-boto package:
+
+sudo zypper in python-boto
+
+
+
+ Create a new Python script called s3test.py with the
+ following content:
+
+import boto
+import boto.s3.connection
+access_key = '11BS02LGFB6AL6H1ADMW'
+secret_key = 'vzCEkuryfn060dfee4fgQPqFrncKEIkh3ZcdOANY'
+conn = boto.connect_s3(
+aws_access_key_id = access_key,
+aws_secret_access_key = secret_key,
+host = '{hostname}',
+is_secure=False,
+calling_format = boto.s3.connection.OrdinaryCallingFormat(),
+)
+bucket = conn.create_bucket('my-new-bucket')
+for bucket in conn.get_all_buckets():
+print "{name}\t{created}".format(
+name = bucket.name,
+created = bucket.creation_date,
+)
+
+ Replace {hostname} with the host name of the host
+ where you configured RADOS Gateway service, for example
+ gateway_host.
+
+
+
+
+ Run the script:
+
+python s3test.py
+
+ The script outputs something like the following:
+
+my-new-bucket 2015-07-22T15:37:42.000Z
+
+
+
+
+
+ Swift Interface Access
+
+ To access RADOS Gateway via Swift interface, you need the swift
+ command line client. Its manual page man 1 swift tells
+ you more about its command line options.
+
+
+ To install swift, run the following:
+
+sudo zypper in python-swiftclient
+
+ The swift access uses the following syntax:
+
+swift -A http://IP_ADDRESS/auth/1.0 \
+-U example_user:swift -K 'swift_secret_key' list
+
+ Replace IP_ADDRESS with the IP address of the
+ gateway server, and swift_secret_key with its
+ value from the output of the radosgw-admin key create
+ command executed for the swift user in
+ .
+
+
+ For example:
+
+swift -A http://gateway.example.com/auth/1.0 -U example_user:swift \
+-K 'r5wWIxjOCeEO7DixD1FjTLmNYIViaC6JVhi3013h' list
+
+ The output is:
+
+my-new-bucket
+
+
+
+ Automated Installation via Salt
+
+
+ The installation can be automated by using the Salt reactor. For virtual
+ environments or consistent hardware environments, this configuration will
+ allow the creation of a Ceph cluster with the specified behavior.
+
+
+
+
+ Salt cannot perform dependency checks based on reactor events. Putting
+ your Salt master into a death spiral is a real risk.
+
+
+
+
+ The automated installation requires the following:
+
+
+
+
+
+ A properly created
+ /srv/pillar/ceph/proposals/policy.cfg.
+
+
+
+
+ Prepared custom configuration, placed to the
+ /srv/pillar/ceph/stack directory.
+
+
+
+
+ The example reactor file
+ /usr/share/doc/packages/deepsea/reactor.conf must be
+ copied to /etc/salt/master.d/reactor.conf.
+
+
+
+
+
+ The default reactor configuration will only run Stages 0 and 1. This allows
+ testing of the reactor without waiting for subsequent stages to complete.
+
+
+
+ When the first salt-minion starts, Stage 0 will begin. A lock prevents
+ multiple instances. When all minions complete Stage 0, Stage 1 will begin.
+
+
+
+ If the operation is performed properly, change the last line in the
+ /etc/salt/master.d/reactor.conf:
+
+
+- /srv/salt/ceph/reactor/discovery.sls
+
+
+ to
+
+
+- /srv/salt/ceph/reactor/all_stages.sls
+
+
+
+ Restarting Ceph services using DeepSea
+
+ When you install updates, specifically ceph-<mon,osd etc> you need to restart the services to make use of the recently installed version. To do so, run:
+
+ salt-run state.orch ceph.restart
+
+ The script iterates over all roles you have configured in the following order: MON, OSD, MDS, RGW, IGW. To keep the downtime low and to find potential issues as early as possible, nodes are restarted sequentially. For example, only one monitoring node is restarted at a time. The command also waits for the cluster to recover if the cluster is in a degraded unhealthy state.
+
+
+ Watching the Restarting
+
+ The process of restarting the cluster may take some time. You can watch the events by using the Salt event bus by running:
+
+ salt-run state.event pretty=True
+
+
+
+
+ Node Management
+
+
+ After you set up a complete cluster you may need to perform additional
+ changes to the cluster like adding or removing monitoring nodes or
+ adding/removing Ceph OSD nodes. Adding and removing of cluster nodes
+ can be done without shutting down the whole cluster, but it might increase
+ replication traffic.
+
+
+
+ Limitations
+
+ The procedures described in sections:
+ and can be performed only with the
+ default CRUSH map. The default CRUSH map must have been created by using
+ Ceph-deploy or DeepSea.
+
+
+
+
+ Adding Ceph OSD Nodes
+
+ The procedure below describes adding of a Ceph OSD node to your cluster.
+
+
+ Adding a Ceph OSD Node
+
+
+ List all Ceph OSD nodes and then choose a proper name for the new
+ node/s
+
+ceph osd tree
+
+
+
+ Inspect your CRUSH map to find out the bucket type, for a procedure refer
+ to . Typically the bucket type is
+ host.
+
+
+
+
+ Create a record for the new node in your CRUSH map.
+
+ceph osd crush add-bucket {bucket name} {bucket type}
+
+ for example:
+
+ceph osd crush add-bucket ses4-4 host
+
+
+
+ Add all OSD that the new node should use. For a procedure refer to
+ .
+
+
+
+
+
+
+ Removing Ceph OSD Nodes
+
+ To remove a Ceph OSD node follow the procedure:
+
+
+ Removing a Ceph OSD Node
+
+
+ Remove all OSD on the node you want to delete as described in
+ .
+
+
+
+
+ Verify that all OSDs have been removed:
+
+ceph osd tree
+
+ The OSD to be removed must not have any OSD.
+
+
+
+
+ Remove the node from the cluster:
+
+ceph osd crush remove {bucket name}
+
+
+
+
+
+ Removing and Reinstalling Salt Cluster Nodes
+
+ You may want remove a role from your minion, to do so use the Stage 5
+ command:
+
+root # salt-run state.orch ceph.stage.5
+
+ When a role is removed from a minion, the objective is to undo all changes
+ related to that role. For most of the roles, the task is simple, but there
+ may be problems with package dependencies. If a package is uninstalled, its
+ dependencies are not.
+
+
+ Removed OSDs appear as blank drives. The related tasks overwrite the
+ beginning of the file systems and remove backup partitions in addition to
+ wiping the partition tables.
+
+
+ Preserving Partitions Created by Other Methods
+
+ Disk drives previously configured by other methods, such as
+ ceph-deploy, may still contain partitions. DeepSea
+ will not automatically destroy these. Currently, the administrator must
+ reclaim these drives.
+
+
+
+
+
+
+ Monitoring
+
+
+ Usage Graphs on Calamari
+
+
+ Calamari—a Ceph's Web front-end for managing and monitoring the
+ cluster—includes several graphs on the cluster's usage.
+
+
+
+ At the bottom of the Dashboard—the home page of
+ Calamari—there are two usage related boxes. While
+ IOPS shows the cluster's overall number of input/output
+ operations per second, the Usage graph shows the number
+ of the cluster's total/used disk space.
+
+
+
+
+
+ You can find more detailed and interactive graphs after clicking the
+ Charts menu item. It shows the cluster's overall
+ input/output operations per second and free disk space by default. Select
+ Pool IOPS from the top drop-down box to detail the view
+ by existing pools.
+
+
+
+
+
+ By moving the slider in the Time Axis pane, you can
+ change the displayed time interval in the graph. By moving the mouse over
+ the graph, the time/read/write information changes accordingly. By clicking
+ and dragging the mouse horizontally across the graph, the specified time
+ interval gets zoomed. You can see more help by moving the mouse over the
+ little question mark in the top right corner of the graph.
+
+
+
+ If you select the host name of a specific Ceph server, Calamari displays
+ detailed information about CPU, average load, and memory related to the
+ specified host.
+
+
+
+
+
+ Checking for Full OSDs
+
+
+ Ceph prevents you from writing to a full OSD so that you do not lose data.
+ In an operational cluster, you should receive a warning when your cluster is
+ getting near its full ratio. The mon osd full ratio
+ defaults to 0.95, or 95% of capacity before it stops clients from writing
+ data. The mon osd nearfull ratio defaults to 0.85, or 85%
+ of capacity, when it generates a health warning.
+
+
+
+ Full OSD nodes will be reported by ceph health:
+
+
+ceph health
+ HEALTH_WARN 1 nearfull osds
+ osd.2 is near full at 85%
+
+
+ or
+
+
+ceph health
+ HEALTH_ERR 1 nearfull osds, 1 full osds
+ osd.2 is near full at 85%
+ osd.3 is full at 97%
+
+
+ The best way to deal with a full cluster is to add new OSD nodes allowing
+ the cluster to redistribute data to the newly available storage.
+
+
+
+ If you cannot start an OSD because it is full, you may delete some data by
+ deleting some placement group directories in the full OSD.
+
+
+
+ Preventing Full OSDs
+
+ After an OSD becomes full—is uses 100% of its disk space—it
+ will normally crash quickly without warning. Following are a few tips to
+ remember when administering OSD nodes.
+
+
+
+
+ Each OSD's disk space (usually mounted under
+ /var/lib/ceph/osd/osd-{1,2..}) needs to be placed on
+ a dedicated underlying disk or partition.
+
+
+
+
+ Check the Ceph configuration files and make sure that Ceph does not
+ store its log file to the disks/partitions dedicated for use by OSDs.
+
+
+
+
+ Make sure that no other process writes to the disks/partitions dedicated
+ for use by OSDs.
+
+
+
+
+
+
+ Checking if OSD Daemons are Running on a Node
+
+
+ To check the status of OSD services on a specific node, log in to the node,
+ and run the following:
+
+
+sudo systemctl status ceph-osd*
+ ceph-osd@0.service - Ceph object storage daemon
+ Loaded: loaded (/usr/lib/systemd/system/ceph-osd@.service; enabled)
+ Active: active (running) since Fri 2015-02-20 11:13:18 CET; 2 days ago
+ Main PID: 1822 (ceph-osd)
+ CGroup: /system.slice/system-ceph\x2dosd.slice/ceph-osd@0.service
+ └─1822 /usr/bin/ceph-osd -f --cluster ceph --id 0
+
+
+ For more information, see .
+
+
+
+ Checking if Monitor Daemons are Running on a Node
+
+
+ To check the status of monitor services on a specific node, log in to the
+ node, and run the following:
+
+
+sudo systemctl status ceph-mon*
+ ceph-mon@doc-ceph1.service - Ceph cluster monitor daemon
+ Loaded: loaded (/usr/lib/systemd/system/ceph-mon@.service; enabled)
+ Active: active (running) since Wed 2015-02-18 16:57:17 CET; 4 days ago
+ Main PID: 1203 (ceph-mon)
+ CGroup: /system.slice/system-ceph\x2dmon.slice/ceph-mon@doc-ceph1.service
+ └─1203 /usr/bin/ceph-mon -f --cluster ceph --id doc-ceph1
+
+
+ For more information, see .
+
+
+
+ What Happens When a Disk Fails?
+
+
+ When a disk with a stored cluster data has a hardware problem and fails to
+ operate, here is what happens:
+
+
+
+
+
+ The related OSD crashed and is automatically removed from the cluster.
+
+
+
+
+ The failed disk's data is replicated to another OSD in the cluster from
+ other copies of the same data stored in other OSDs.
+
+
+
+
+ Then you should remove the disk from the cluster CRUSH Map, and
+ physically from the host hardware.
+
+
+
+
+
+ What Happens When a Journal Disk Fails?
+
+
+ Ceph OSDs use journaling file systems (see
+ for
+ more information) to store data. When a disk dedicated to a journal fails,
+ the related OSD(s) fail as well (see
+ ).
+
+
+
+ Hosting Multiple Journals on One Disk
+
+ For performance boost, you can use a fast disk (such as SSD) to store
+ journal partitions for several OSDs. We do not recommend to host journals
+ for more than 4 OSDs on one disk, because in case of the journals' disk
+ failure, you risk losing stored data for all the related OSDs' disks.
+
+
+
+
+
+ Disk Management
+
+
+ Adding Disks
+
+
+
+
+
+ This can be done on a live cluster without downtime.
+
+
+
+
+ This will cause increased replication traffic between servers.
+
+
+
+
+ Doing this operation repeatedly before the last operation has completed
+ replication can save the cluster overall rebuild time.
+
+
+
+
+
+
+ To add a disk (/dev/sdd in our example) to a Ceph
+ cluster, follow these steps:
+
+
+
+
+
+ Create a partition sdd1 on the disk:
+
+sudo parted /dev/sdd1 mkpart primary 0.0 -1s
+
+
+
+ Format the partition with XFS file system:
+
+sudo mkfs.xfs -f /dev/sdd1
+
+
+
+ Find out the UUID (Universally Unique Identifier) of the disk:
+
+ls -l /dev/disk/by-uuid | grep sdd1
+ [...] 04bb24f1-d631-47ff-a2ee-22d94ad4f80c -> ../../sdd1
+
+
+
+ Add the corresponding line to /etc/fstab for the
+ example disk osd.12:
+
+[...]
+ UUID=04bb24f1-d631-47ff-a2ee-22d94ad4f80c /mnt/osd.12 xfs \
+ defaults,errors=remount-ro 0 1
+ [...]
+
+
+
+ Mount the disk:
+
+sudo mount /mnt/osd.12
+
+
+
+ Add the new disk to /etc/ceph/ceph.conf and copy the
+ updated configuration file to all other nodes in the cluster.
+
+
+
+
+ Create the OSD:
+
+ceph osd create 04bb24f1-d631-47ff-a2ee-22d94ad4f80c
+
+
+
+ Make sure that the new OSD is accepted into the cluster:
+
+sudo mkdir /srv/ceph/04bb24f1-d631-47ff-a2ee-22d94ad4f80c
+ ceph-osd -i 12 --mkfs --mkkey
+ ceph auth add osd.12 osd 'allow *' mon 'allow rwx' -i /etc/ceph/keyring.osd.12
+
+
+
+ Start the newly added OSD:
+
+sudo systemctl start ceph-osd@12.service
+
+
+
+ Add it to the cluster and allow replication based on CRUSH Map:
+
+ceph osd crush set 12 osd.12 1.0 \
+ pool=pool_name rack=rack_name host=host_name-osd
+
+
+
+ Check that the new OSD is in the right place within the cluster:
+
+ceph osd tree
+
+
+
+
+
+ The process of preparing/adding a disk can be simplified with the
+ ceph-disk command. See
+ for more
+ information on ceph-disk.
+
+
+
+
+ Deleting disks
+
+
+
+
+
+ This can be done on a live cluster without downtime.
+
+
+
+
+ This will cause increased replication traffic between servers.
+
+
+
+
+ Be sure not to remove too many disks from your cluster to be able to keep
+ the replication rules. See for more
+ information.
+
+
+
+
+
+
+ To delete a disk (for example osd.12) from a Ceph
+ cluster, follow these steps:
+
+
+
+
+
+ Make sure you have the right disk:
+
+ceph osd tree
+
+
+
+ If the disk is a member of a pool and/or active:
+
+
+
+
+ Drain the OSD by setting its weight to zero:
+
+ceph osd crush reweight osd.12 0
+
+ Then wait for all the placement groups to be moved away to other OSDs
+ with ceph -w. Optionally, you can check if the OSD is
+ emptying with df -h.
+
+
+
+
+ Mark the disk out:
+
+ceph osd out 12
+
+
+
+ Stop the related OSD service:
+
+sudo systemctl stop ceph-osd@12.service
+
+
+
+
+
+ Remove the disk from CRUSH Map:
+
+ceph osd crush remove osd.12
+
+
+
+ Remove authentication information for the disk:
+
+ceph auth del osd.12
+
+
+
+ Remove the disk from the cluster:
+
+ceph osd rm 12
+
+
+
+ Wipe the disk to remove all the data:
+
+sudo sgdisk --zap-all -- disk_device_name
+sudo sgdisk --clear --mbrtogpt -- disk_device_name
+
+
+
+
+ How to Use Existing Partitions for OSDs Including OSD Journals
+
+
+
+ This section describes an advanced topic that only storage experts and
+ developers should examine. It is mostly needed when using non-standard OSD
+ journal sizes. If the OSD partition's size is less than 10GB, its initial
+ weight is rounded to 0 and because no data are therefore placed on it, you
+ should increase its weight. We take no responsibility for overfilled
+ journals.
+
+
+
+
+ If you need to use existing disk partitions as an OSD node, the OSD journal
+ and data partitions need to be in a GPT partition table.
+
+
+
+ You need to set the correct partition types to the OSD partitions so that
+ udev recognizes them correctly and sets their
+ ownership to ceph:ceph.
+
+
+
+ For example, to set the partition type for the journal partition
+ /dev/vdb1 and data partition
+ /dev/vdb2, run the following:
+
+
+sudo sgdisk --typecode=1:45b0969e-9b03-4f30-b4c6-b4b80ceff106 /dev/vdb
+sudo sgdisk --typecode=2:4fbd7e29-9d25-41b8-afd0-062c0ceff05d /dev/vdb
+
+
+
+ The Ceph partition table types are listed in
+ /usr/lib/udev/rules.d/95-ceph-osd.rules:
+
+cat /usr/lib/udev/rules.d/95-ceph-osd.rules
+# OSD_UUID
+ACTION=="add", SUBSYSTEM=="block", \
+ ENV{DEVTYPE}=="partition", \
+ ENV{ID_PART_ENTRY_TYPE}=="4fbd7e29-9d25-41b8-afd0-062c0ceff05d", \
+ OWNER:="ceph", GROUP:="ceph", MODE:="660", \
+ RUN+="/usr/sbin/ceph-disk --log-stdout -v trigger /dev/$name"
+ACTION=="change", SUBSYSTEM=="block", \
+ ENV{ID_PART_ENTRY_TYPE}=="4fbd7e29-9d25-41b8-afd0-062c0ceff05d", \
+ OWNER="ceph", GROUP="ceph", MODE="660"
+
+# JOURNAL_UUID
+ACTION=="add", SUBSYSTEM=="block", \
+ ENV{DEVTYPE}=="partition", \
+ ENV{ID_PART_ENTRY_TYPE}=="45b0969e-9b03-4f30-b4c6-b4b80ceff106", \
+ OWNER:="ceph", GROUP:="ceph", MODE:="660", \
+ RUN+="/usr/sbin/ceph-disk --log-stdout -v trigger /dev/$name"
+ACTION=="change", SUBSYSTEM=="block", \
+ ENV{ID_PART_ENTRY_TYPE}=="45b0969e-9b03-4f30-b4c6-b4b80ceff106", \
+ OWNER="ceph", GROUP="ceph", MODE="660"
+[...]
+
+
+
+
+ Recovery
+
+
+ 'Too Many PGs per OSD' Status Message
+
+
+ If you receive a Too Many PGs per OSD message after
+ running ceph status, it means that the
+ value (300 by default) was
+ exceeded. This value is compared to the number of PGs per OSD ratio. This
+ means that the cluster setup is not optimal.
+
+
+
+ As the number of PGs cannot be reduced after the pool is created, the only
+ solution is to add OSDs to the cluster so that the ratio of PGs per OSD
+ becomes lower.
+
+
+
+ Calamari Has a Stale Cluster
+
+
+ The Calamari back-end supports operating multiple clusters, while its
+ front-end does not yet. This means that if you point Calamari at one
+ cluster, then destroy that cluster and create a new one, and then point the
+ same Calamari instance at the new cluster, it will still remember the old
+ cluster and possibly/probably try to display the old cluster state by
+ default.
+
+
+
+ To make Calamari 'forget' the old cluster, run:
+
+
+sudo systemctl stop cthulhu.service
+sudo calamari-ctl clear --yes-i-am-sure
+sudo calamari-ctl initialize
+
+
+ This will make Calamari forget all the old clusters it knows about. It will,
+ however, not clear out the salt minion keys from the master. This is fine if
+ you are reusing the same nodes for the new cluster.
+
+
+
+ 'nn pg stuck inactive' Status Message
+
+
+ If you receive a stuck inactive status message after
+ running ceph status, it means that Ceph does not know
+ where to replicate the stored data to fulfill the replication rules. It can
+ happen shortly after the initial Ceph setup and fix itself automatically.
+ In other cases, this may require a manual interaction, such as bringing up a
+ broken OSD, or adding a new OSD to the cluster. In very rare cases, reducing
+ the replication level may help.
+
+
+
+ If the placement groups are stuck perpetually, you need to check the output
+ of ceph osd tree. The output should look tree-structured,
+ similar to the example in .
+
+
+
+ If the output of ceph osd tree is rather flat as in the
+ following example
+
+
+ceph osd tree
+ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
+-1 0 root default
+ 0 0 osd.0 up 1.00000 1.00000
+ 1 0 osd.1 up 1.00000 1.00000
+ 2 0 osd.2 up 1.00000 1.00000
+
+
+ you should check that the related CRUSH map has a tree structure. If it is
+ also flat, or with no hosts as in the above example, it may mean that host
+ name resolution is not working correctly across the cluster.
+
+
+
+ OSD Weight is 0
+
+
+ When OSD starts, it is assigned a weight. The higher the weight, the bigger
+ the chance that the cluster writes data to the OSD. The weight is either
+ specified in a cluster CRUSH Map, or calculated by the OSDs' start-up
+ script.
+
+
+
+ In some cases, the calculated value for OSDs' weight may be rounded down to
+ zero. It means that the OSD is not scheduled to store data, and no data is
+ written to it. The reason is usually that the disk is too small (smaller
+ than 15GB) and should be replaced with a bigger one.
+
+
+
+ OSD is Down
+
+
+ OSD daemon is either running, or stopped/down. There are 3 general reasons
+ why an OSD is down:
+
+
+
+
+
+ Hard disk failure.
+
+
+
+
+ The OSD crashed.
+
+
+
+
+ The server crashed.
+
+
+
+
+
+ You can see the detailed status of OSDs by running
+
+
+ceph osd tree
+# id weight type name up/down reweight
+ -1 0.02998 root default
+ -2 0.009995 host doc-ceph1
+ 0 0.009995 osd.0 up 1
+ -3 0.009995 host doc-ceph2
+ 1 0.009995 osd.1 up 1
+ -4 0.009995 host doc-ceph3
+ 2 0.009995 osd.2 down 1
+
+
+ The example listing shows that the osd.2 is down. Then
+ you may check if the disk where the OSD is located is mounted:
+
+
+lsblk -f
+ [...]
+ vdb
+ ├─vdb1 /var/lib/ceph/osd/ceph-2
+ └─vdb2
+
+
+ You can track the reason why the OSD is down by inspecting its log file
+ /var/log/ceph/ceph-osd.2.log. After you find and fix
+ the reason why the OSD is not running, start it with
+
+
+sudo systemctl start ceph-osd@2.service
+
+
+ Do not forget to replace 2 with the actual number of your
+ stopped OSD.
+
+
+
+ Fixing Clock Skew Warnings
+
+
+ The time information in all cluster nodes must be synchronized. If a node's
+ time is not fully synchronized, you may get clock skew warnings when
+ checking the state of the cluster.
+
+
+
+ Time synchronization is managed with NTP (see
+ ).
+ Set each node to synchronize its time with one or more NTP servers,
+ preferably to the same group of NTP servers. If the time skew still occurs
+ on a node, follow these steps to fix it:
+
+
+systemctl stop ntpd.service
+systemctl stop ceph-mon.target
+systemctl start ntpd.service
+systemctl start ceph-mon.target
+
+
+ You can then query the NTP peers and check the time offset with
+ sudo ntpq -p.
+
+
+
+ The Ceph monitors need to have their clocks synchronized to within 0.05
+ seconds of each other. In a typical
+ ntpd configuration with remote NTP
+ servers, it may be impossible for
+ ntpd to reliably maintain this
+ degree of accuracy. In such cases, the Ceph developers recommend running
+ an NTP server in the local network.
+
+
+
+
+ Accountancy
+
+
+ Adding S3 Users
+
+
+ S3 (Simple Storage Service) is an online file storage Web service, offered
+ by Amazon. You can use the S3 interface to interact with the Ceph RADOS Gateway,
+ besides the Swift interface. You need to create a user to interact with
+ the gateway.
+
+
+
+ To create a user for the S3 interface, follow these steps:
+
+
+sudo radosgw-admin user create --uid=username \
+ --display-name="display-name" --email=email
+
+
+ For example:
+
+
+sudo radosgw-admin user create \
+ --uid=example_user \
+ --display-name="Example User" \
+ --email=penguin@example.com
+
+
+ The command also creates the user's access and secret key. Check its output
+ for access_key and secret_key keywords
+ and their values:
+
+
+[...]
+ "keys": [
+ { "user": "example_user",
+ "access_key": "11BS02LGFB6AL6H1ADMW",
+ "secret_key": "vzCEkuryfn060dfee4fgQPqFrncKEIkh3ZcdOANY"}],
+ [...]
+
+
+ Removing S3 Users
+
+
+ To remove a user previously created to interact with the S3 interface, use
+ the following command:
+
+
+sudo radosgw-admin user rm --uid=example_user
+
+
+ For more information on the command's options, see
+ .
+
+
+
+ User Quota Management
+
+
+ The Ceph RADOS Gateway enables you to set quotas on users and buckets owned by
+ users. Quotas include the maximum number of objects in a bucket and the
+ maximum storage size in megabytes.
+
+
+
+ Before you enable a user quota, you first need to set its parameters:
+
+
+radosgw-admin quota set --quota-scope=user --uid=example_user \
+ --max-objects=1024 --max-size=1024
+
+
+
+
+
+
+
+ Specifies the maximum number of objects. A negative value disables the
+ check.
+
+
+
+
+
+
+
+
+ Specifies the maximum number of bytes. A negative value disables the
+ check.
+
+
+
+
+
+
+
+
+ Sets the scope for the quota. The options are bucket
+ and user. Bucket quotas apply to buckets a user owns.
+ User quotas apply to a user.
+
+
+
+
+
+
+ Once you set a user quota, you may enable it:
+
+
+radosgw-admin quota enable --quota-scope=user --uid=example_user
+
+
+ To disable a quota:
+
+
+radosgw-admin quota disable --quota-scope=user --uid=example_user
+
+
+ To list quota settings:
+
+
+radosgw-admin user info --uid=example_user
+
+
+ To update quota statistics:
+
+
+radosgw-admin user stats --uid=example_user --sync-stats
+
+
+ Adding Swift Users
+
+
+ Swift is a standard for stored data access compatible with OpenStack. It is
+ used to interact with the Ceph RADOS Gateway. You need to create a Swift user,
+ access key and secret to enable end users to interact with the gateway.
+ There are two types of users: a user and
+ subuser. While users are used when
+ interacting with the S3 interface, subusers are users
+ of the Swift interface. Each subuser is associated to a user.
+
+
+
+
+
+ To create a Swift user—which is a subuser in
+ our terminology—you need to create the associated
+ user first.
+
+sudo radosgw-admin user create --uid=username \
+ --display-name="display-name" --email=email
+
+ For example:
+
+sudo radosgw-admin user create \
+ --uid=example_user \
+ --display-name="Example User" \
+ --email=penguin@example.com
+
+
+
+ To create a subuser (Swift interface) for the user, you must specify the
+ user ID (--uid=username), a subuser ID, and the
+ access level for the subuser.
+
+radosgw-admin subuser create --uid=uid \
+ --subuser=uid \
+ --access=[ read | write | readwrite | full ]
+
+ For example:
+
+radosgw-admin subuser create --uid=example_user \
+ --subuser=example_user:swift --access=full
+
+
+
+ Generate a secret key for the user.
+
+sudo radosgw-admin key create \
+ --gen-secret \
+ --subuser=example_user:swift \
+ --key-type=swift
+
+
+
+ Both commands will output JSON-formatted data showing the user state.
+ Notice the following lines, and remember the secret_key
+ value:
+
+"swift_keys": [
+ { "user": "example_user:swift",
+ "secret_key": "r5wWIxjOCeEO7DixD1FjTLmNYIViaC6JVhi3013h"}],
+
+
+
+
+ For more information on using Swift client, see
+ .
+
+
+
+ Removing Swift Users
+
+
+ When you remove a user, the user and subuser are removed from the system.
+ However, you may remove only the subuser if you want. To remove a user (and
+ subuser), specify and the user ID.
+
+
+radosgw-admin user rm --uid=example_user
+
+
+ To remove the subuser only, specify and the
+ subuser ID.
+
+
+radosgw-admin subuser rm --uid=example_user:swift
+
+
+ You can make use of the following options:
+
+
+
+
+ --purge-data
+
+
+ Purges all data associated to the user ID.
+
+
+
+
+ --purge-keys
+
+
+ Purges all keys associated to the user ID.
+
+
+
+
+
+
+ Removing a Subuser
+
+ When you remove a subuser, you are removing access to the Swift interface.
+ The user will remain in the system. To remove the subuser, specify
+ and the subuser ID.
+
+sudo radosgw-admin subuser rm --uid=example_user:swift
+
+ You can make use of the following option:
+
+
+
+ --purge-keys
+
+
+ Purges all keys associated to the user ID.
+
+
+
+
+
+
+
+ Changing S3 and Swift User Access and Secret Keys
+
+
+ The access_key and secret_key
+ parameters identify the RADOS Gateway user when accessing the gateway. Changing the
+ existing user keys is the same as creating new ones, as the old keys get
+ overwritten.
+
+
+
+ For S3 users, run the following:
+
+
+radosgw-admin key create --uid=example_user --key-type=s3 --gen-access-key --gen-secret
+
+
+ For Swift users, run the following:
+
+
+radosgw-admin key create --subuser=example_user:swift --key-type=swift --gen-secret
+
+
+
+
+
+
+
+ Specifies the type of key. Either swift or
+ s3.
+
+
+
+
+
+
+
+
+ Generates a random access key (for S3 user by default).
+
+
+
+
+
+
+
+
+ Generates a random secret key.
+
+
+
+
+
+
+
+
+ Specifies a secret key, for example manually generated.
+
+
+
+
+
+
+
+ Tune-ups
+
+
+ How Does the Number of Placement Groups Affect the Cluster Performance?
+
+
+ Placement groups (PGs) are internal data structures for storing data in a
+ pool across OSDs. The way Ceph stores data into PGs is defined in a
+ CRUSH Map, and you can override the default by editing it. When creating a
+ new pool, you need to specify the initial number of PGs for the pool.
+
+
+
+ When your cluster is becoming 70% to 80% full, it is time to add more OSDs
+ to it. When you increase the number of OSDs, you may consider increasing the
+ number of PGs as well.
+
+
+
+
+ Changing the number of PGs causes a lot of data transfer within the
+ cluster.
+
+
+
+
+ To calculate the optimal value for your newly-resized cluster is a complex
+ task.
+
+
+
+ A high number of PGs creates small chunks of data. This speeds up recovery
+ after an OSD failure, but puts a lot of load on the monitor nodes as they
+ are responsible for calculating the data location.
+
+
+
+ On the other hand, a low number of PGs takes more time and data transfer to
+ recover from an OSD failure, but does not impose that much load on monitor
+ nodes as they need to calculate locations for less (but larger) data chunks.
+
+
+
+ Find more information on the optimal number of PGs for your cluster using
+ the online calculator.
+
+
+
+ Can I Use SSDs and Hard Disks on the Same Cluster?
+
+
+ Solid-state drives (SSD) are generally faster than hard disks. If you mix
+ the two types of disks for the same write operation, the data writing to the
+ SSD disk will be slowed down by the hard disk performance. Thus, you should
+ never mix SSDs and hard disks for data writing
+ following the same rule (see
+ for more information on rules for storing
+ data).
+
+
+
+ There are generally 2 cases where using SSD and hard disk on the same
+ cluster makes sense:
+
+
+
+
+
+ Use each disk type for writing data following different rules. Then you
+ need to have a separate rule for the SSD disk, and another rule for the
+ hard disk.
+
+
+
+
+ Use each disk type for a specific purpose. For example the SSD disk for
+ journal, and the hard disk for storing data.
+
+
+
+
+
+ What are the Trade-offs of Using a Journal on SSD?
+
+
+ Using SSDs for OSD journal(s) is better for performance as the journal is
+ usually the bottleneck of hard disk-only OSDs. SSDs are often used to share
+ journals of several OSDs.
+
+
+
+ Following is a list of potential disadvantages of using SSDs for OSD
+ journal:
+
+
+
+
+
+ SSD disks are more expensive than hard disks. But as one OSD journal
+ requires up to 6GB of disk space only, the price may not be so crucial.
+
+
+
+
+ SSD disk consumes storage slots which can be otherwise used by a large
+ hard disk to extend the cluster capacity.
+
+
+
+
+ SSD disks have reduced write cycles compared to hard disks, but modern
+ technologies are beginning to eliminate the problem.
+
+
+
+
+ If you share more journals on the same SSD disk, you risk losing all the
+ related OSDs after the SSD disk fails. This will require a lot of data to
+ be moved to rebalance the cluster.
+
+
+
+
+ Hotplugging disks becomes more complex as the data mapping is not 1:1 the
+ failed OSD and the journal disk.
+
+
+
+
+
+
+ Integration
+
+ Storing KVM Disks in Ceph Cluster
+
+
+ You can create a disk image for KVM-driven virtual machine, store it in a
+ Ceph pool, optionally convert the content of an existing image to it, and
+ then run the virtual machine with qemu-kvm making use of
+ the disk image stored in the cluster. For more detailed information, see
+ .
+
+
+
+ Storing libvirt Disks in Ceph Cluster
+
+
+ Similar to KVM (see ), you can
+ use Ceph to store virtual machines driven by libvirt. The advantage is
+ that you can run any libvirt-supported virtualization solution, such as
+ KVM, Xen, or LXC. For more information, see
+ .
+
+
+
+ Storing Xen Disks in Ceph Cluster
+
+
+ One way to use Ceph for storing Xen disks is to make use of libvirt as
+ described in .
+
+
+
+ Another option is to make Xen talk to the rbd
+ block device driver directly:
+
+
+
+
+
+ If you have no disk image prepared for Xen, create a new one:
+
+rbd create myimage --size 8000 --pool mypool
+
+
+
+ List images in the pool mypool and check if your new
+ image is there:
+
+rbd list mypool
+
+
+
+ Create a new block device by mapping the myimage image
+ to the rbd kernel module:
+
+sudo rbd map --pool mypool myimage
+
+ User Name and Authentication
+
+ To specify a user name, use . Moreover, if you use
+ cephx authentication, you must also specify a
+ secret. It may come from a keyring or a file containing the secret:
+
+sudo rbd map --pool rbd myimage --id admin --keyring
+ /path/to/keyring
+
+ or
+
+sudo rbd map --pool rbd myimage --id admin --keyfile /path/to/file
+
+
+
+
+ List all mapped devices:
+
+rbd showmapped
+ id pool image snap device
+ 0 mypool myimage - /dev/rbd0
+
+
+
+ Now you can configure Xen to use this device as a disk for running a
+ virtual machine. You can for example add the following line to the
+ xl-style domain configuration file:
+
+disk = [ '/dev/rbd0,,sda', '/dev/cdrom,,sdc,cdrom' ]
+
+
+
+
+ Mounting and Unmounting an RBD Image
+
+
+ Images stored inside a Ceph cluster pool can be mapped to a block device.
+ You can then format such device, mount it to be able to exchange files, and
+ unmount it when done.
+
+
+
+
+
+ Make sure your Ceph cluster includes a pool with the disk image you want
+ to mount. Assume the pool is called mypool and the
+ image is myimage.
+
+rbd list mypool
+
+
+
+ Map the image to a new block device.
+
+sudo rbd map --pool mypool myimage
+
+ User Name and Authentication
+
+ To specify a user name, use . Moreover, if you use
+ cephx authentication, you must also specify a
+ secret. It may come from a keyring or a file containing the secret:
+
+sudo rbd map --pool rbd myimage --id admin --keyring
+ /path/to/keyring
+
+ or
+
+sudo rbd map --pool rbd myimage --id admin --keyfile /path/to/file
+
+
+
+
+ List all mapped devices:
+
+rbd showmapped
+ id pool image snap device
+ 0 mypool myimage - /dev/rbd0
+
+ The device we want to work on is /dev/rbd0.
+
+
+
+
+ Make an XFS file system on the /dev/rbd0 device.
+
+sudo mkfs.xfs /dev/rbd0
+ log stripe unit (4194304 bytes) is too large (maximum is 256KiB)
+ log stripe unit adjusted to 32KiB
+ meta-data=/dev/rbd0 isize=256 agcount=9, agsize=261120 blks
+ = sectsz=512 attr=2, projid32bit=1
+ = crc=0 finobt=0
+ data = bsize=4096 blocks=2097152, imaxpct=25
+ = sunit=1024 swidth=1024 blks
+ naming =version 2 bsize=4096 ascii-ci=0 ftype=0
+ log =internal log bsize=4096 blocks=2560, version=2
+ = sectsz=512 sunit=8 blks, lazy-count=1
+ realtime =none extsz=4096 blocks=0, rtextents=0
+
+
+
+ Mount the device and check it is correctly mounted. Replace
+ /mnt with your mount point.
+
+sudo mount /dev/rbd0 /mnt
+ mount | grep rbd0
+ /dev/rbd0 on /mnt type xfs (rw,relatime,attr2,inode64,sunit=8192,...
+
+ Now you can move data from/to the device as if it was a local directory.
+
+
+ Increasing the Size of RBD Device
+
+ If you find that the size of the RBD device is no longer enough, you can
+ easily increase it.
+
+
+
+
+ Increase the size of the RBD image, for example up to 10GB.
+
+rbd resize --size 10000 mypool/myimage
+ Resizing image: 100% complete...done.
+
+
+
+ Grow the file system to fill up the new size of the device.
+
+sudo xfs_growfs /mnt
+ [...]
+ data blocks changed from 2097152 to 2560000
+
+
+
+
+
+
+ After you finish accessing the device, you can unmount it.
+
+sudo unmount /mnt
+
+
+
+
+
+ Cluster Maintenance and Troubleshooting
+
+
+
+ Creating and Deleting Pools from Calamari
+
+
+ Apart from using the command line to create or delete pools (see
+ and
+ ), you can do the same
+ from within Calamari in a more comfortable user interface.
+
+
+
+ To create a new pool using Calamari, follow these steps:
+
+
+
+
+
+ Log in to a running instance of Calamari.
+
+
+
+
+ Go to
+ ManagePools.
+ You can see a list of the cluster's existing pools.
+
+
+
+
+ Click
+
+
+
+ in the right top.
+
+
+
+
+ Enter a name for the new pool, and either change the number of replicas,
+ number of placement groups, and the CRUSH ruleset, or leave them at
+ default values.
+
+
+
+
+ Click
+
+
+
+ to confirm, then Cancel the
+ warning dialog.
+
+
+
+
+ Now you can see the new pool in the list of all existing pools. You can
+ verify the existence of the new pool on the command line with
+
+ceph osd lspools
+
+
+
+
+ To delete an existing pool using Calamari, follow these steps:
+
+
+
+
+
+ Log in to a running instance of Calamari.
+
+
+
+
+ Go to
+ ManagePools.
+ You can see a list of the cluster's existing pools.
+
+
+
+
+ From the list of pools, choose the one to delete and click the related
+
+
+
+
+
+
+
+
+
+ Confirm the deletion and Cancel the warning dialog.
+
+
+
+
+ You can verify the deletion of the pool on the command line with
+
+ceph osd lspools
+
+
+
+
+ Managing Keyring Files
+
+
+ When Ceph runs with authentication and authorization enabled (enabled by
+ default), you must specify a user name and a keyring containing the secret
+ key of the specified user. If you do not specify a user name, Ceph will use
+ client.admin as the default user name. If you do not
+ specify a keyring, Ceph will look for a keyring via the
+ setting in the Ceph configuration. For example,
+ if you execute the ceph health command without specifying
+ a user or keyring:
+
+
+ceph health
+
+
+ Ceph interprets the command like this:
+
+
+ceph -n client.admin --keyring=/etc/ceph/ceph.client.admin.keyring health
+
+
+ ceph-authtool is a utility to create, view, and modify a
+ Ceph keyring file. A keyring file stores one or more Ceph authentication
+ keys and possibly an associated capability specification. Each key is
+ associated with an entity name, of the form {client,mon,mds,osd}.name.
+
+
+
+ To create a new keyring file in the current directory
+ containing a key for client.example1:
+
+
+ceph-authtool -C -n client.example1 --gen-key keyring
+
+
+ To add a new key for client.example2, omit the
+ option:
+
+
+ceph-authtool -n client.example2 --gen-key keyring
+
+
+ The keyring now has two entries:
+
+
+ceph-authtool -l keyring
+ [client.example1]
+ key = AQCQ04NV8NE3JBAAHurrwc2BTVkMGybL1DYtng==
+ [client.example2]
+ key = AQBv2INVWMqFIBAAf/4/H3zxzAsPBTH4jsN80w==
+
+
+ For more information on ceph-authtool, see its manual
+ page man 8 ceph-authtool.
+
+
+
+ Creating Client Keys
+
+
+ User management functionality provides Ceph cluster administrators with
+ the ability to create, update and delete users directly in the cluster
+ environment.
+
+
+
+
+ When you create or delete users in the Ceph cluster, you may need to
+ distribute keys to clients so that they can be added to keyrings.
+
+
+
+
+ Adding a user creates a user name (TYPE.ID), a secret key and possibly
+ capabilities included in the command you use to create the user. A user’s
+ key enables the user to authenticate with the cluster. The user’s
+ capabilities authorize the user to read, write, or execute on monitors,
+ OSDs, or metadata servers.
+
+
+
+ Authentication key creation usually follows cluster user creation. There are
+ several ways to add a user. The most convenient seems to be using
+
+
+ceph auth get-or-create
+
+
+ It returns a keyfile format with the user name [in brackets] and the key. If
+ the user already exists, this command simply returns the user name and key
+ in the keyfile format. You may use the option to save the output to a
+ file.
+
+
+ceph auth get-or-create client.example1
+ [client.example1]
+ key = AQDs+odVODCGGxAAvmSnsNx3XYHJ7Ri6sZFfhw==
+
+
+ You can verify that the client key was added to the cluster keyring:
+
+
+ceph auth list
+ [...]
+ client.example1
+ key: AQDs+odVODCGGxAAvmSnsNx3XYHJ7Ri6sZFfhw==
+
+
+ When creating client users, you may create a user with no capabilities. A
+ user with no capabilities is useless beyond mere authentication, because the
+ client cannot retrieve the cluster map from the monitor. However, you can
+ create a user with no capabilities if you want to defer adding capabilities
+ later using the ceph auth caps command.
+
+
+
+
+ After you add a key to the cluster keyring, go to the relevant client(s)
+ and copy the keyring from the cluster host to the client(s).
+
+
+
+
+ Find more details in the related upstream documentation, see
+ User
+ Management.
+
+
+
+ Revoking Client Keys
+
+
+ If you need to remove an already generated client key from the keyring file,
+ use the ceph auth del command. To remove the key for user
+ client.example1 that we added in
+ :
+
+
+ceph auth del client.example1
+
+
+ and check the deletion with ceph auth list.
+
+
+
+
+ After you add a key to the cluster keyring, go to the relevant client(s)
+ and copy the keyring from the cluster host to the client(s).
+
+
+
+
+ Checking for Unbalanced Data Writing
+
+
+ When data is written to OSDs evenly, the cluster is considered balanced.
+ Each OSD within a cluster is assigned its weight. The
+ weight is a relative number and tells Ceph how much of the data should be
+ written to the related OSD. The higher the weight, the more data will be
+ written. If an OSD has zero weight, no data will be written to it. If the
+ weight of an OSD is relatively high compared to other OSDs, a large portion
+ of the data will be written there, which makes the cluster unbalanced.
+
+
+
+ Unbalanced clusters have poor performance, and in the case that an OSD with
+ a high weight suddenly crashes, a lot of data needs to be moved to other
+ OSDs, which slows down the cluster as well.
+
+
+
+ To avoid this, you should regularly check OSDs for the amount of data
+ writing. If the amount is between 30% and 50% of the capacity of a group of
+ OSDs specified by a given rule set, you need to reweight the OSDs. Check for
+ individual disks and find out which of them fill up faster than the others
+ (or are generally slower), and lower their weight. The same is valid for
+ OSDs where not enough data is written—you can increase their weight to
+ have Ceph write more data to them. In the following example, you will find
+ out the weight of an OSD with ID 13, and reweight it from 3 to 3.05:
+
+
+$ ceph osd tree | grep osd.13
+ 13 3 osd.13 up 1
+
+ $ ceph osd crush reweight osd.13 3.05
+ reweighted item id 13 name 'osd.13' to 3.05 in crush map
+
+ $ ceph osd tree | grep osd.13
+ 13 3.05 osd.13 up 1
+
+
+
+
+ OSD Reweight by Utilization
+
+ The ceph osd reweight-by-utilization
+ threshold command automates the process of
+ reducing the weight of OSDs which are heavily overused. By default it will
+ adjust the weights downward on OSDs which reached 120% of the average
+ usage, but if you include threshold it will use that percentage instead.
+
+
+
+
+ Time Synchronization of Nodes
+
+ Ceph requires precise time synchronization between particular nodes. You should set up a node with your own NTP server. Even though you can point all ntpd instances to a remote public time server, we do not recommend it with Ceph. With such a configuration, each node in the cluster has its own NTP daemon that communicate continually over the Internet with a set of three or four time servers, all of which are quite some hops away. This solution introduces a large degree of latency variability that makes it difficult or impossible to keep the clock drift under 0.05 seconds (which is what the Ceph monitors require).
+
+
+ Thus use a single machine as the NTP server for the whole cluster. Your NTP server ntpd instance may then point to the remore (public) NTP server or it can have its own time source. The ntpd instances on all nodes are then pointed to this local server. Such a solution has several advantages like—eliminating unnecessary network traffic and clock skews, decreasing load on the public NTP servers. For details how to set up the NTP server refer to SUSE Linux Enterprise Server Administration Guide.
+
+
+ Then to change the time on your cluster, do the following:
+
+
+ Setting Time
+
+ You may face a situation when you need to set the time back, e.g. if the time changes from the summer to the standard time. We do not recommend to move the time backward for a longer period than the cluster is down. Moving the time forward does not cause any trouble.
+
+
+
+ Time Synchronization on the Cluster
+
+
+ Stop all clients accessing the Ceph cluster, especially those using iSCSI.
+
+
+
+
+ Shut down your Ceph cluster. On each node run:
+
+ rcceph stop
+
+
+ If you use Ceph and SUSE OpenStack Cloud, stop also the SUSE OpenStack Cloud.
+
+
+
+
+
+ Verify that your NTP server is set up correctly—all ntpd daemons det their time from a source or sources in the local network.
+
+
+
+
+ Set the correct time on your NTP server.
+
+
+
+
+ Verify that NTP is running and working properly, on all nodes run:
+
+ status ntpd.service
+ or
+ ntpq -p
+
+
+
+ Start all monitoring nodes and verify that there is no clock skew:
+
+ systemctl start target
+
+
+
+ Start all OSD nodes.
+
+
+
+
+ Start other Ceph services.
+
+
+
+
+ Start the SUSE OpenStack Cloud if you have it.
+
+
+
+
+
+
+
+
+ Upgrading Software
+
+
+ Both SUSE Linux Enterprise Server and SUSE Enterprise Storage products are provided with regular package updates.
+ To apply new updates to the whole cluster, you need to run
+
+
+sudo zypper dup
+
+
+ on all cluster nodes. Remember to upgrade all the monitor nodes first, and
+ then all the OSD nodes one by one.
+
+
+
+ Increasing the Number of Placement Groups
+
+
+ When creating a new pool, you specify the number of placement groups for the
+ pool (see ). After adding more
+ OSDs to the cluster, you usually need to increase the number of placement
+ groups as well for performance and data durability reasons. For each
+ placement group, OSD and monitor nodes need memory, network and CPU at all
+ times and even more during recovery. From which follows that minimizing the
+ number of placement groups saves significant amounts of resources.
+
+
+
+ Too High Value of
+
+ When changing the value for a pool, it may happen
+ that the new number of placement groups exceeds the allowed limit. For
+ example
+
+ceph osd pool set rbd pg_num 4096
+ Error E2BIG: specified pg_num 3500 is too large (creating 4096 new PGs \
+ on ~64 OSDs exceeds per-OSD max of 32)
+
+ The limit prevents extreme placement group splitting, and is derived from
+ the value.
+
+
+
+
+ To determine the right new number of placement groups for a resized cluster
+ is a complex task. One approach is to continuously grow the number of
+ placement groups up to the state when the cluster performance is optimal. To
+ determine the new incremented number of placement groups, you need to get
+ the value of the parameter, and add
+ it to the current number of placement groups. To give you a basic idea, take
+ a look at the following script:
+
+
+max_inc=`ceph daemon mon.a config get mon_osd_max_split_count 2>&1 \
+ | tr -d '\n ' | sed 's/.*"\([[:digit:]]\+\)".*/\1/'`
+ pg_num=`ceph osd pool get rbd pg_num | cut -f2 -d: | tr -d ' '`
+ echo "current pg_num value: $pg_num, max increment: $max_inc"
+ next_pg_num="$(($pg_num+$max_inc))"
+ echo "allowed increment of pg_num: $next_pg_num"
+
+
+ After finding out the next number of placement groups, increase it with
+
+
+ceph osd pool set pool_name pg_num next_pg_num
+
+
+ Adding a Pool
+
+
+ After you first deploy a cluster, Ceph uses the default pools to store
+ data. You can later create a new pool with
+
+
+ceph osd pool create
+
+
+ For more information on cluster pool creation, see
+ .
+
+
+
+ Deleting a Pool
+
+
+ By deleting a pool, you permanently destroy all data stored in that pool.
+ You can delete a previously created pool with
+
+
+ceph osd pool delete
+
+
+ For more information on cluster pool deletion, see
+ .
+
+
+
+ Troubleshooting
+
+ This section describes several issues that you may face when you operate a Ceph cluster.
+
+
+
+ Sending Large Objects with rados Fails with Full OSD
+
+
+ rados is a command line utility to manage RADOS object
+ storage. For more information, see man 8 rados.
+
+
+
+ If you send a large object to a Ceph cluster with the
+ rados utility, such as
+
+
+rados -p mypool put myobject /file/to/send
+
+
+ it can fill up all the related OSD space and cause serious trouble to the
+ cluster performance. RADOS has a 'striper' API that enables applications to
+ stripe large objects over multiple OSDs. If you turn the striping feature on
+ with the option, you can prevent the OSD from
+ filling up.
+
+
+rados --striper -p mypool put myobject /file/to/send
+
+
+
+ Corrupted XFS Filesystem
+
+ In rare circumstances like kernel bug or broken/misconfigured hardware, the underlying file system (XFS) in which an OSD stores its data might be damaged and unmountable.
+
+
+ If you are sure there is no problem with your hardware and the system is configured properly, raise a bug against the XFS subsytem of the SUSE Linux Enterprise Server kernel and mark the particular OSD as down:
+
+ ceph osd down OSD identification
+
+ Do Not Format or Otherwise Modify the Damaged Device
+
+ Even though using xfs_repair to fix the problem in the filesystem may seem reasonable, do not use it as the command modifies the file system. The OSD may start but its functioning may be influenced.
+
+
+
+ Now zap the underlying disk and recreate the OSD by running:
+
+ ceph-disk prepare --zap $OSD_DISK_DEVICE $OSD_JOURNAL_DEVICE"
+ for example:
+ ceph-disk prepare --zap /dev/sdb /dev/sdd2
+
+
+
+
+ Performance Diagnosis
+
+
+ Finding Slow OSDs
+
+
+ When tuning the cluster performance, it is very important to identify slow
+ storage/OSDs within the cluster. The reason is that if the data is written
+ to the slow(est) disk, the complete write operation slows down as it always
+ waits until it is finished on all the related disks.
+
+
+
+ It is not trivial to locate the storage bottleneck. You need to examine each
+ and every OSD to find out the ones slowing down the write process. To do a
+ benchmark on a single OSD, run:
+
+
+ceph tell osd_id bench
+
+
+ For example:
+
+
+cephadm > ceph tell osd.0 bench
+ { "bytes_written": 1073741824,
+ "blocksize": 4194304,
+ "bytes_per_sec": "19377779.000000"}
+
+
+ Then you need to run this command on each OSD and compare the
+ bytes_per_sec value to get the slow(est) OSDs.
+
+
+
+ Is My Network Causing Issues?
+
+
+ There are more reasons why the cluster performance may become weak. One of
+ them can be network problems. In such case, you may notice the cluster
+ reaching quorum, OSD and monitor nodes going offline, data transfers taking
+ a long time, or a lot of reconnect attempts.
+
+
+
+ To check whether cluster performance is degraded by network problems,
+ inspect the Ceph log files under the /var/log/ceph
+ directory.
+
+
+
+ To fix network issues on the cluster, focus on the following points:
+
+
+
+
+
+ Basic network diagnostics. Try to ping between cluster nodes and pay
+ attention to data loss and response times.
+
+
+
+
+ Network performance benchmark. Use tools such as Netperf to measure the
+ performance of your network.
+
+
+
+
+ Check firewall settings on cluster nodes. Make sure they do not block
+ ports/protocols required by Ceph operation. See
+ for more information on firewall
+ settings.
+
+
+
+
+ Check the networking hardware, such as network cards, cables, or switches,
+ for proper operation.
+
+
+
+
+
+ Separate Network
+
+ To ensure fast and safe network communication between cluster nodes, set up
+ a separate network used exclusively by the cluster OSD and monitor nodes.
+
+
+
+
+
+ Server Maintenance
+
+
+ Adding a Server to a Cluster
+
+
+
+ When adding an OSD to an existing cluster, be aware that the cluster will
+ be rebalancing for some time afterward. To minimize the rebalancing
+ periods, it is best to add all the OSDs you intend to add at the same time.
+
+
+
+
+ If you are adding an OSD to a cluster, follow
+ .
+
+
+
+ If you are adding a monitor to a cluster, follow
+ .
+
+
+
+
+ After adding a monitor, make sure that
+ /etc/ceph/ceph.conf files on each server point to the
+ new monitor as well so that it works after the next reboot.
+
+
+
+
+
+ Adding an OSD and monitor on the same server is recommended only for small
+ size clusters. Although the monitor can share disk with the operating
+ system (preferably an SSD disk for performance reasons), it should
+ never share disk with an OSD.
+
+
+
+
+ Removing a Server from a Cluster
+
+
+ When removing an OSD from an existing cluster, make sure there are enough
+ OSDs left in the cluster so that the replication rules can be followed. Also
+ be aware that the cluster will be rebalancing for some time after removing
+ the OSD.
+
+
+
+ If you are removing an OSD from a cluster, follow
+ .
+
+
+
+ If you are removing a monitor from a cluster, follow
+ .
+
+
+
+ Increasing File Descriptors
+
+
+ For OSD daemons, the read/write operations are critical to keep the Ceph
+ cluster balanced. They often need to have many files open for reading and
+ writing at the same time. On the OS level, the maximum number of
+ simultaneously open files is called 'maximum number of file descriptors'.
+
+
+
+ To prevent OSDs from running out of file descriptors, you can override the
+ OS default value and specify the number in
+ /etc/ceph/ceph.conf, for example:
+
+
+max_open_files = 131072
+
+
+ After you change , you need to restart the
+ OSD service on the relevant Ceph node.
+
+
+
+
+ Networking
+
+
+ Setting NTP to a Ceph Cluster
+
+
+ In a cluster environment, it is necessary to keep all cluster nodes' time
+ synchronized. NTP—Network Time Protocol—is a network service
+ commonly used for this purpose. NTP is well integrated in SUSE products,
+ including SUSE Enterprise Storage. There are two ways to configure NTP—either using
+ YaST, or setting it up manually. Find both methods described—and
+ more information on NTP in general—in
+ SUSE Linux Enterprise Server
+ Administration Guide.
+
+
+
+ Firewall Settings for Ceph
+
+
+ We recommend protecting the network cluster communication with SUSE
+ Firewall. You can edit its configuration by selecting
+ YaSTSecurity and
+ UsersFirewallAllowed
+ Services.
+
+
+
+ For Calamari, enable the "HTTP Server", "Carbon" and "SaltStack" services
+ (ports 80, 2003, 2004, 4505 and 4506).
+
+
+
+ For Ceph monitor nodes, enable the "Ceph MON" service (port 6789).
+
+
+
+ For Ceph OSD (or MDS) nodes, enable the "Ceph OSD/MDS" service (ports
+ 6800-7300).
+
+
+
+ Adding a Private Network to a Running Cluster
+
+
+ If you do not specify a cluster network during Ceph deployment, it assumes
+ a single public network environment. While Ceph operates fine with a
+ public network, its performance and security improves when you set a second
+ private cluster network.
+
+
+
+ A general recommendation for a Ceph cluster is to have two networks: a
+ public (front-side) and cluster (back-side) one. To support two networks,
+ each Ceph node needs to have at least two network cards.
+
+
+
+ You need to apply the following changes to each Ceph node. It is
+ comfortable for a small cluster, but can be very time demanding if you have
+ a cluster consisting of hundreds or thousands of nodes.
+
+
+
+
+
+ Stop Ceph related services on each cluster node.
+
+
+ Replace 10.0.0.0/24 with the IP address and netmask of
+ the cluster network. You can specify more comma-delimited subnets. If you
+ need to specifically assign static IP addresses or override
+ settings, you can do so with the optional
+ .
+
+
+
+
+ Check that the private cluster network works as expected on the OS level.
+
+
+
+
+ Start Ceph related services on each cluster node.
+
+sudo rcceph start
+
+
+
+
+
+
+
+ Glossary
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES2.1
+
+
+
+ General
+ Admin node
+
+
+ The node from which you run the ceph-deploy utility to
+ deploy Ceph on OSD nodes.
+
+
+
+ Bucket
+
+
+ A point which aggregates other nodes into a hierarchy of physical
+ locations.
+
+
+
+ CRUSH, CRUSH Map
+
+
+ An algorithm that determines how to store and retrieve data by computing
+ data storage locations. CRUSH requires a map of the cluster to
+ pseudo-randomly store and retrieve data in OSDs with a uniform
+ distribution of data across the cluster.
+
+
+
+ Monitor node, MON
+
+
+ A cluster node that maintains maps of cluster state, including the monitor
+ map, or the OSD map.
+
+
+
+ OSD node
+
+
+ A cluster node that stores data, handles data replication, recovery,
+ backfilling, rebalancing, and provides some monitoring information to
+ Ceph monitors by checking other Ceph OSD daemons.
+
+
+
+ Node
+
+
+ Any single machine or server in a Ceph cluster.
+
+
+
+ Pool
+
+
+ Logical partitions for storing objects such as disk images.
+
+
+
+ Rule Set
+
+
+ Rules to determine data placement for a pool.
+
+
+
+
+
+
+ Ceph Specific Terms
+ Calamari
+
+
+ A management and monitoring system for Ceph storage cluster. It provides
+ a Web user interface that makes Ceph cluster monitoring simple.
+
+
+
+ Ceph Storage Cluster
+
+
+ The core set of storage software which stores the user’s data. Such a
+ set consists of Ceph monitors and OSDs.
+
+
+ AKA Ceph Object Store
.
+
+
+
+ RADOS Gateway
+
+
+ The S3/Swift gateway component for Ceph Object Store.
+
+
+
+
+
+
+ Salt State (SLS) File Example
+
+ This example shows a cluster configuration split into several SLS files. You
+ can customize them to build up a cluster with Salt. Note that you need to
+ do local customization to the SLS files, such as supplying suitable disk
+ device names, host names and IP addresses valid for your network environment.
+ Lines beginning with '#' are comments.
+
+
+ The structure is following:
+
+
+├── ses
+│ ├── ceph
+│ │ ├── ceph.conf
+│ │ └── init.sls
+│ ├── common
+│ │ ├── admin_key.sls
+│ │ ├── mds_key.sls
+│ │ ├── mon_key.sls
+│ │ ├── osd_key.sls
+│ │ └── rgw_key.sls
+│ ├── mds
+│ │ └── init.sls
+│ ├── mon
+│ │ └── init.sls
+│ ├── osd
+│ │ └── init.sls
+│ └── rgw
+│ └── init.sls
+└── top.sls
+
+
+ top.sls
+
+ The configuration toplevel file top.sls includes other
+ SLS files from the subdirectories of the ses directory,
+ depending on which component is used.
+
+base:
+ '*':
+ - ses.ceph
+ '*mon*':
+ - ses.mon
+ '*osd*':
+ - ses.osd
+ '*mds*':
+ - ses.mds
+ '*rgw*':
+ - ses.rgw
+
+
+ ses/ceph/init.sls
+# We need to install ceph and its configuration library
+packages:
+ pkg.installed:
+ - names:
+ - ceph
+ - python-ceph-cfg
+
+# We need a ceph configuration file before we start.
+# Note:
+# - The file name is dependent on the cluster name:
+# /etc/ceph/${CLUSTER_NAME}.conf
+/etc/ceph/ceph.conf:
+ file:
+ - managed
+ - source:
+# Where to get the source file will have to be customized to your environment.
+ - salt://ses/ceph/ceph.conf
+ - user: root
+ - group: root
+ - mode: 644
+ - makedirs: True
+ - require:
+ - pkg: packages
+
+
+ ses/ceph/ceph.conf
+[global]
+fsid = eaac9695-4265-4ca8-ac2a-f3a479c559b1
+mon_initial_members = osd-mon-node0, mon-osd-mds-node1, mon-osd-node2
+mon_host = 192.168.100.168,192.168.100.223,192.168.100.130
+auth_cluster_required = cephx
+auth_service_required = cephx
+auth_client_required = cephx
+filestore_xattr_use_omap = true
+
+
+ Create the admin key and the keys for specific service types with
+
+salt '*' ceph.keyring_create type=<keyring_type>
+
+ before saving them. Customize the 'secret' value for your site using the
+ values from the previous create command. All keys must be saved before the
+ monitors are created as this has a side effect of creating keys not managed
+ by Salt.
+
+
+ ses/common/admin_key.sls
+keyring_admin_save:
+ module.run:
+ - name: ceph.keyring_save
+ - kwargs: {
+ 'keyring_type' : 'admin',
+ 'secret' : 'AQBR8KhWgKw6FhAAoXvTT6MdBE+bV+zPKzIo6w=='
+ }
+ - require:
+ - sls: ses.ceph
+
+
+ ses/common/mds_key.sls
+keyring_mds_save:
+ module.run:
+ - name: ceph.keyring_save
+ - kwargs: {
+ 'keyring_type' : 'mds',
+ 'secret' : 'AQBR8KhWgKw6FhAAoXvTT6MdBE+bV+zPKzIo6w=='
+ }
+ - require:
+ - sls: ses.ceph
+
+
+ ses/common/mon_key.sls
+keyring_mon_save:
+ module.run:
+ - name: ceph.keyring_save
+ - kwargs: {
+ 'keyring_type' : 'mon',
+ 'secret' : 'AQBR8KhWgKw6FhAAoXvTT6MdBE+bV+zPKzIo6w=='
+ }
+ - require:
+ - sls: ses.ceph
+
+
+ ses/common/osd_key.sls
+keyring_osd_save:
+ module.run:
+ - name: ceph.keyring_save
+ - kwargs: {
+ 'keyring_type' : 'osd',
+ 'secret' : 'AQBR8KhWgKw6FhAAoXvTT6MdBE+bV+zPKzIo6w=='
+ }
+ - require:
+ - sls: ses.ceph
+
+
+ ses/common/rgw_key.sls
+keyring_rgw_save:
+ module.run:
+ - name: ceph.keyring_save
+ - kwargs: {
+ 'keyring_type' : 'rgw',
+ 'secret' : 'AQBR8KhWgKw6FhAAoXvTT6MdBE+bV+zPKzIo6w=='
+ }
+ - require:
+ - sls: ses.ceph
+
+
+ ses/mds/init.sls
+include:
+ - ses.ceph
+ - ses.common.mds_key
+
+keyring_mds_auth_add:
+ module.run:
+ - name: ceph.keyring_mds_auth_add
+ - require:
+ - module: keyring_mds_save
+ - ceph: cluster_status
+
+mds_create:
+ module.run:
+ - name: ceph.mds_create
+ - kwargs: {
+ name: mds.{{ grains['machine_id'] }},
+ port: 1000,
+ addr:{{ grains['fqdn_ip4'] }}
+ }
+ - require:
+ - module: keyring_mds_auth_add
+
+
+ ses/mon/init.sls
+include:
+ - ses.ceph
+ - ses.common.admin_key
+ - ses.common.mon_key
+
+mon_create:
+ module.run:
+ - name: ceph.mon_create
+ - require:
+ - module: keyring_admin_save
+ - module: keyring_mon_save
+
+cluster_status:
+ ceph.quorum:
+ - require:
+ - module: mon_create
+
+
+ ses/osd/init.sls
+include:
+ - ses.ceph
+ - ses.common.osd_key
+
+keyring_osd_auth_add:
+ module.run:
+ - name: ceph.keyring_osd_auth_add
+ - require:
+ - module: keyring_osd_save
+ - ceph: cluster_status
+
+# Prepare disks for OSD use
+
+prepare_vdb:
+ module.run:
+ - name: ceph.osd_prepare
+ - kwargs: {
+ osd_dev: /dev/vdb
+ }
+ - require:
+ - module: keyring_osd_auth_add
+
+# Activate OSD's on prepared disks
+
+activate_vdb:
+ module.run:
+ - name: ceph.osd_activate
+ - kwargs: {
+ osd_dev: /dev/vdb
+ }
+
+
+ ses/rgw/init.sls
+include:
+ - ses.ceph
+ - ses.common.rgw_key
+
+
+
+ Example Procedure of Manual Ceph Installation
+
+ The following procedure shows the commands that you need to install Ceph
+ storage cluster manually.
+
+
+
+
+ Generate the key secrets for the Ceph services you plan to run. You can
+ use the following command to generate it:
+
+python -c "import os ; import struct ; import time; import base64 ; \
+ key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; \
+ print base64.b64encode(header + key)"
+
+
+
+ Add the keys to the related keyrings. First for
+ client.admin, then for monitors,
+ and then other related services, such as OSD, RADOS Gateway, or MDS:
+
+ceph-authtool -n client.admin \
+ --create-keyring /etc/ceph/ceph.client.admin.keyring \
+ --cap mds 'allow *' --cap mon 'allow *' --cap osd 'allow *'
+ceph-authtool -n mon. \
+ --create-keyring /var/lib/ceph/bootstrap-mon/ceph-osceph-03.keyring \
+ --set-uid=0 --cap mon 'allow *'
+ceph-authtool -n client.bootstrap-osd \
+ --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring \
+ --cap mon 'allow profile bootstrap-osd'
+ceph-authtool -n client.bootstrap-rgw \
+ --create-keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring \
+ --cap mon 'allow profile bootstrap-rgw'
+ceph-authtool -n client.bootstrap-mds \
+ --create-keyring /var/lib/ceph/bootstrap-mds/ceph.keyring \
+ --cap mon 'allow profile bootstrap-mds'
+
+
+
+ Create a monmap—a database of all monitors in a cluster:
+
+monmaptool --create --fsid eaac9695-4265-4ca8-ac2a-f3a479c559b1 \
+ /tmp/tmpuuhxm3/monmap
+monmaptool --add osceph-02 192.168.43.60 /tmp/tmpuuhxm3/monmap
+monmaptool --add osceph-03 192.168.43.96 /tmp/tmpuuhxm3/monmap
+monmaptool --add osceph-04 192.168.43.80 /tmp/tmpuuhxm3/monmap
+
+
+
+ Create a new keyring and import keys from the admin and monitors' keyrings
+ there. Then use them to start the monitors:
+
+ceph-authtool --create-keyring /tmp/tmpuuhxm3/keyring \
+ --import-keyring /var/lib/ceph/bootstrap-mon/ceph-osceph-03.keyring
+ceph-authtool /tmp/tmpuuhxm3/keyring \
+ --import-keyring /etc/ceph/ceph.client.admin.keyring
+sudo -u ceph ceph-mon --mkfs -i osceph-03 \
+ --monmap /tmp/tmpuuhxm3/monmap --keyring /tmp/tmpuuhxm3/keyring
+systemctl restart ceph-mon@osceph-03
+
+
+
+ Check the monitors state in systemd:
+
+systemctl show --property ActiveState ceph-mon@osceph-03
+
+
+
+ Check if Ceph is running and reports the monitor status:
+
+ceph --cluster=ceph \
+ --admin-daemon /var/run/ceph/ceph-mon.osceph-03.asok mon_status
+
+
+
+ Check the specific services' status using the existing keys:
+
+ceph --connect-timeout 5 --keyring /etc/ceph/ceph.client.admin.keyring \
+ --name client.admin -f json-pretty status
+[...]
+ceph --connect-timeout 5 \
+ --keyring /var/lib/ceph/bootstrap-mon/ceph-osceph-03.keyring \
+ --name mon. -f json-pretty status
+
+
+
+ Import keyring from existing Ceph services and check the status:
+
+ceph auth import -i /var/lib/ceph/bootstrap-osd/ceph.keyring
+ceph auth import -i /var/lib/ceph/bootstrap-rgw/ceph.keyring
+ceph auth import -i /var/lib/ceph/bootstrap-mds/ceph.keyring
+ceph --cluster=ceph \
+ --admin-daemon /var/run/ceph/ceph-mon.osceph-03.asok mon_status
+ceph --connect-timeout 5 --keyring /etc/ceph/ceph.client.admin.keyring \
+ --name client.admin -f json-pretty status
+
+
+
+ Prepare disks/partitions for OSDs, using the XFS file system:
+
+ceph-disk -v prepare --fs-type xfs --data-dev --cluster ceph \
+ --cluster-uuid eaac9695-4265-4ca8-ac2a-f3a479c559b1 /dev/vdb
+ceph-disk -v prepare --fs-type xfs --data-dev --cluster ceph \
+ --cluster-uuid eaac9695-4265-4ca8-ac2a-f3a479c559b1 /dev/vdc
+[...]
+
+
+
+ Activate the partitions:
+
+ceph-disk -v activate --mark-init systemd --mount /dev/vdb1
+ceph-disk -v activate --mark-init systemd --mount /dev/vdc1
+
+
+
+ For SUSE Enterprise Storage version 2.1 and earlier, create the default pools:
+
+ceph --connect-timeout 5 --keyring /etc/ceph/ceph.client.admin.keyring \
+ --name client.admin osd pool create .users.swift 16 16
+ceph --connect-timeout 5 --keyring /etc/ceph/ceph.client.admin.keyring \
+ --name client.admin osd pool create .intent-log 16 16
+ceph --connect-timeout 5 --keyring /etc/ceph/ceph.client.admin.keyring \
+ --name client.admin osd pool create .rgw.gc 16 16
+ceph --connect-timeout 5 --keyring /etc/ceph/ceph.client.admin.keyring \
+ --name client.admin osd pool create .users.uid 16 16
+ceph --connect-timeout 5 --keyring /etc/ceph/ceph.client.admin.keyring \
+ --name client.admin osd pool create .rgw.control 16 16
+ceph --connect-timeout 5 --keyring /etc/ceph/ceph.client.admin.keyring \
+ --name client.admin osd pool create .users 16 16
+ceph --connect-timeout 5 --keyring /etc/ceph/ceph.client.admin.keyring \
+ --name client.admin osd pool create .usage 16 16
+ceph --connect-timeout 5 --keyring /etc/ceph/ceph.client.admin.keyring \
+ --name client.admin osd pool create .log 16 16
+ceph --connect-timeout 5 --keyring /etc/ceph/ceph.client.admin.keyring \
+ --name client.admin osd pool create .rgw 16 16
+
+
+
+ Create the RADOS Gateway instance key from the bootstrap key:
+
+ceph --connect-timeout 5 --cluster ceph --name client.bootstrap-rgw \
+ --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create \
+ client.rgw.0dc1e13033d2467eace46270f0048b39 osd 'allow rwx' mon 'allow rw' \
+ -o /var/lib/ceph/radosgw/ceph-rgw.rgw_name/keyring
+
+
+
+
+ Enable and start RADOS Gateway:
+
+systemctl enable ceph-radosgw@rgw.rgw_name
+systemctl start ceph-radosgw@rgw.rgw_name
+
+
+
+ Optionally, create the MDS instance key from the bootstrap key, then enable
+ and start it:
+
+ceph --connect-timeout 5 --cluster ceph --name client.bootstrap-mds \
+ --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create \
+ mds.mds.rgw_name osd 'allow rwx' mds allow mon \
+ 'allow profile mds' \
+ -o /var/lib/ceph/mds/ceph-mds.rgw_name/keyring
+systemctl enable ceph-mds@mds.rgw_name
+systemctl start ceph-mds@mds.rgw_name
+
+
+
+
+ Documentation Updates
+
+
+ tbazant@suse.com
+ editing
+
+
+
+
+ SES4
+
+
+
+ This chapter lists content changes for this document since the initial
+ release of SUSE Enterprise Storage 1.
+
+
+ The document was updated on the following dates:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ February, 2017 (Release of SUSE Enterprise Storage 4 Maintenance Update 1)
+
+
+
+ General Updates
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Bugfixes
+
+
+
+
+ Added the admin node to the upgrade workflow in
+ ().
+
+
+
+
+ Rewrote to avoid globbing
+ services
+ ().
+
+
+
+
+
+
+
+
+ December, 2016 (Release of SUSE Enterprise Storage 4)
+
+
+
+ General Updates
+
+
+
+
+
+ Restructured the whole document introducing DocBook 'parts' to group
+ related chapters.
+
+
+
+
+ Introduced (Fate #321085).
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Replaced the old upgrade procedure with
+ .
+
+
+
+
+
+
+
+
+
+ Bugfixes
+
+
+
+
+ Extended to include more
+ detailed information based on the SUSE OpenStack Cloud documentation
+ ().
+
+
+
+
+ Increased the memory requirement for an OSD in
+
+ ().
+
+
+
+
+ Improved the each node preparation section
+
+ ().
+
+
+
+
+ Improved
+ ().
+
+
+
+
+ Improved
+ ().
+
+
+
+
+ Improved
+ ().
+
+
+
+
+ Improved
+ ().
+
+
+
+
+
+
+
+
+ June, 2016 (Release of SUSE Enterprise Storage 3)
+
+
+
+ General Updates
+
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added (Fate #320602).
+
+
+
+
+ Added .
+
+
+
+
+ Added (Fate #318586).
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Improved network recommendation tip in
+ .
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+
+
+
+
+
+ Bugfixes
+
+
+
+
+ Improved the procedure to set up hot-storage and cold-storage in
+ and added
+ .
+ ().
+
+
+
+
+ Added a command to install Ceph on the MDS server in
+
+ ().
+
+
+
+
+ Added a tip referring to more information about using existing
+ partitions for OSDs in
+
+ ().
+
+
+
+
+ Reordered snapshots-related sections and created
+ with
+ and
+
+ ().
+
+
+
+
+ Mixing installation methods is not supported in
+
+ ().
+
+
+
+
+ Format 1 is no longer the default (in favor of the format 2) when
+ creating RBD volumes in
+ ().
+
+
+
+
+ Added note about increasing the ruleset number in
+
+ ().
+
+
+
+
+ Specified which clients are able to migrate to optimal tunables
+ ().
+
+
+
+
+ Split into
+ and added configuration
+ options description
+ ().
+
+
+
+
+ Added
+ ().
+
+
+
+
+ Updated minimal recommendations in
+ ().
+
+
+
+
+ Fixed support information on snapshot cloning in
+
+ ().
+
+
+
+
+ Improved 'bucket' explanation in
+ ().
+
+
+
+
+ Clarified non-mixing workload phrase in
+ ().
+
+
+
+
+ Updated RAM requirement for OSDs in
+ ().
+
+
+
+
+ Fixed hit_set_count default value in
+ and added note with external link
+ in
+ ().
+
+
+
+
+ Fixed and improved ceph-deploy command line in
+
+ ().
+
+
+
+
+ Updated several places to match the current Ceph release in
+ , , and
+
+ ().
+
+
+
+
+ In , added (explanation) of the
+ following poll parameters: hashpspool,
+ expected_num_objects,
+ cache_target_dirty_high_ratio,
+ hit_set_grade_decay_rate,
+ hit_set_grade_search_last_n,
+ fast_read, scrub_min_interval,
+ scrub_max_interval,
+ deep_scrub_interval, nodelete,
+ nopgchange, nosizechange,
+ noscrub, nodeep-scrub.
+ ().
+
+
+
+
+ Added
+ ().
+
+
+
+
+ Added software pattern selection screens in
+
+ ().
+
+
+
+
+ Removed RAID recommendations for OSD disks placement in
+ and
+ ().
+
+
+
+
+ Updated the default set of CRUSH map's buckets in
+
+ ().
+
+
+
+
+ Removed 'data' and 'metadata' pools, no longer the default
+ ().
+
+
+
+
+ Fixed trademarked 3rd party products names and replaced with entities
+ in
+ ().
+
+
+
+
+ Updated RADOS Gateway service name to
+ ceph-radosgw@radosgw.gateway_name
+ across affected sections
+ ().
+
+
+
+
+ Added
+ ().
+
+
+
+
+ Recommended to use sudo with the
+ ceph command in
+
+ ().
+
+
+
+
+ Changed the default min_size value in
+
+ ().
+
+
+
+
+ Fixed the master:dns_name_of_salt_master option in
+
+ ().
+
+
+
+
+ Extended and improved the OSD removal procedure in
+
+ ().
+
+
+
+
+ Prefixed RADOS Gateway hosts with rgw. in
+
+ ().
+
+
+
+
+ Changed the subcommand position in the ceph-deploy calamari
+ connect command in
+ ().
+
+
+
+
+ Added information about perpetually stuck PGs in
+
+ ().
+
+
+
+
+
+
+
+
+ January, 2016 (Release of SUSE Enterprise Storage 2.1)
+
+
+
+ General Updates
+
+
+
+
+
+ Removed Btrfs as it is not supported as of SUSE Enterprise Storage 2.
+
+
+
+
+ Moved from
+ to
+ so that the information provided follows the right order.
+
+
+
+
+ Added .
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Added .
+
+
+
+
+ Added a short description of the option in
+ the ceph-deploy calamari connect command in
+ .
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Removed the Checking MDS Status section in
+ as MDS is not covered yet.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Added .
+
+
+
+
+
+
+
+
+
+ Bugfixes
+
+
+
+
+ Added systemctl stop cthulhu.service when clearing a
+ stale cluster
+ ().
+
+
+
+
+ Fixed a typo in
+ ()
+
+
+
+
+ Fixed a typo in the ceph-deploy rgw command syntax
+ ().
+
+
+
+
+ Restructured the whole , added
+ ,
+ ,
+ , and
+
+ ().
+
+
+
+
+ Added links to the extension installation in
+ and
+
+ ().
+
+
+
+
+ Renamed 'Monitoring a cluster' to 'Determining cluster state' in
+
+ ().
+
+
+
+
+ Added .
+ ().
+
+
+
+
+ Advised the customers to manually prevent Apache from listening on the
+ default port 80 if they prefer a different port number
+ ().
+
+
+
+
+ Removed FastCGI occurrences and file references in
+
+ ().
+
+
+
+
+ Added ceph-deploy way of installing/migrating RADOS Gateway
+ instances
+ ().
+
+
+
+
+ Moved the step about checking the firewall status to
+
+ ().
+
+
+
+
+ Inserted information about the need for correct network setup for each
+ node in the procedure in
+
+ ().
+
+
+
+
+ Extended the information about 'zapping' the previously used disk while
+ entirely erasing its content in
+
+ ().
+
+
+
+
+ Moved the tip on the non-default cluster name to the end of
+
+ ().
+
+
+
+
+ Added tip on SSH alias in
+
+ ().
+
+
+
+
+ Removed the last complex zypper rm command from
+ Ceph cleaning stage in
+
+ ().
+
+
+
+
+ Suggest to install the romana package instead
+ of calamari-clients in
+
+ ().
+
+
+
+
+ Swapped the order of chapters and
+
+ ().
+
+
+
+
+ Fixed Apache support information
+ ().
+
+
+
+
+ Either erased or made it optional to use ceph-deploy osd
+ activate in
+ ,
+ , and
+
+ ().
+
+
+
+
+
+
+
+
+ October, 2015 (Release of SUSE Enterprise Storage 2)
+
+
+
+ General
+
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Extended the tip on cleaning the previous Calamari installation in
+ .
+
+
+
+
+ Fixed the blocking firewall in the installation procedure in
+
+ ().
+
+
+
+
+ Shifted the cluster health check in the installation procedure in
+
+ ().
+
+
+
+
+ Added a tip on disabling requiretty in
+
+ ().
+
+
+
+
+ Fixed the pool names in
+
+ ().
+
+
+
+
+ Added ceph to be installed along with
+ ceph-deploy in
+
+ ().
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Fixed the systemctl radosgw command in
+
+ ().
+
+
+
+
+ Replaced Apache with the embedded Civetweb, mainly in
+ .
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added a tip on preventing for full OSDs in
+
+ ().
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added.
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+ Added .
+
+
+
+
+
+
+
+
+
diff --git a/geekodoc/tests/book.suma-cloud.xml b/geekodoc/tests/book.suma-cloud.xml
new file mode 100644
index 0000000..02df021
--- /dev/null
+++ b/geekodoc/tests/book.suma-cloud.xml
@@ -0,0 +1,403 @@
+
+
+
+
+ SUSE Manager in the Public Cloud
+ SUSE Manager Server and SUSE Manager Proxy in the Public
+ Cloud
+
+ 3
+ SUSE Manager
+
+
+
+
+ SUSE Manager delivers best-in-class Linux server management
+ capabilities. For detailed information about the product please refer to
+ the SUSE
+ Manager documentation.
+ The SUSE Manager Server and SUSE Manager Proxy images published by
+ SUSE in selected Public Cloud environments are provided as Bring Your Own
+ Subscription (BYOS) images. SUSE Manager Server instances need to be
+ registered with the SUSE Customer Center (SCC). Subscriptions of
+ SUSE Manager Proxy instances are handled through their parent SUSE Manager
+ Server. After an instance is launched, SUSE Manager needs to be set up and
+ configured following the procedure in the SUSE Manager documentation.
+
+
+
+
+ Instance Requirements
+ Select an instance size that meets the system requirements as
+ documented in the SUSE Manager documentation.
+
+
+ Minimal main memory: >12G
+
+
+ The SUSE Manager setup procedure performs a Forward-confirmed
+ reverse DNS lookup. This must succeed in order for the setup procedure to
+ complete successfully and for SUSE Manager to operate as expected. Therefore
+ it is important that the hostname and IP configuration be performed prior
+ to running the SUSE Manager setup procedure.
+
+
+ SUSE Manager Server and SUSE Manager Proxy instances are expected
+ to run in a network configuration that provides you control over
+ DNS entries and that is shielded from the Internet at large. Within
+ this network configuration DNS (Domain Name Service) resolution
+ must be provided, such that hostname -f returns
+ the FQDN (Full Qualified Domain Name). The DNS resolution is not only
+ important for the SUSE Manager Server procedure but is also important
+ when clients are configured to be managed via SUSE Manager. Configuring
+ DNS is Cloud Framework dependent, please refer to the cloud service
+ provider documentation for detailed instructions.
+
+
+ Minimal free disk space for SUSE Manager 15G.
+ For Public Cloud instances we recommend that the repositories
+ and the SUSE Manager Server database, and respectively the SUSE Manager Proxy
+ squid cache, be located on an external virtual disk. The details for
+ this setup are provided in
+ .
+ Storing the database and the repositories on an external virtual
+ disk ensures that the data is not lost should the instance need to be
+ terminated for some reason.
+
+
+ Please ensure that the selected instance type matches the requirements
+ listed above. Although we recommend that the database and the repositories
+ are stored on a separate device it is still recommended to set the root
+ volume size of the instance to 20G.
+
+
+ Setup
+
+
+ Run an instance of the SUSE Manager Server or SUSE Manager Proxy
+ image as published by SUSE.
+ The images are identifiable by the suse, manager, server or proxy,
+ and byos keywords in each public cloud environment.
+ The SUSE Manager instance must run in a network access restricted
+ environment such as the private subnet of a VPC or with an appropriate
+ firewall setting such that it can only be accessed by machines in the IP
+ ranges you use. A generally accessible SUSE Manager instance violates the
+ terms of the SUSE Manager EULA. Access to the web interface of SUSE
+ Manager requires https.
+
+
+ Setup the hostname as follows:
+
+ SUSE Manager requires a stable and reliable hostname and does not
+ take kindly to hostname changes. All commands provided need to be
+ executed as the root user.
+
+
+ Disable hostname setup in the dhcp configuration file:
+ /etc/sysconfig/network/dhcp
+
+ DHCLIENT_SET_HOSTNAME="no"
+
+
+
+ Set the hostname to a name of your choice. Please note
+ it is important to provide the system name
+ and not the fully qualified hostname
+ to the hostnamectl command.
+
+ It is expected that the Fully Qualified Domain Name (FQDN) is
+ set by the cloud framework; for example if
+ cloud_instance.cloud.net is the
+ fully qualified name than
+ cloud_instance
+ is the system name and cloud.net
+ is the domain name.
+
+ In the following example we will change the system name to
+ suma
+ $ hostnamectl set-hostname suma
+ The fully qualified hostname is now locally set to
+ suma.cloud.net. Once the hostname
+ is set locally a DNS entry needs to be created in your network
+ environment such that domain name resolution works properly.
+
+ Alternatively to force proper resolution of the name on the
+ SUSE Manager system you may alter the /etc/hosts
+ file as follows:
+ $ echo "${local_address} suma.cloud.net suma" >> /etc/hosts
+ The current value for the local_address,
+ can be obtained from the public cloud Web console or from within
+ a terminal session as follows:
+
+
+ Obtain local ip address from within Amazon EC2 instance
+ $ ec2metadata --local-ipv4
+
+
+ Obtain local ip address from within Google Compute Engine instance
+ $ gcemetadata --query instance --network-interfaces --ip
+
+
+ Obtain local ip address from within Microsoft Azure instance
+ $ azuremetadata --internal-ip
+
+
+
+ Note that forcing the DNS resolution to work via modification
+ of the /etc/hosts file will allow the
+ yast2 susemanager_setup procedure to work. However,
+ if DNS resolution is not properly configured this also implies that
+ any client that is to be managed via this SUSE Manager instance must
+ also receive the same modification to /etc/hosts.
+
+
+ One more aspect of hostname resolution is the
+ /etc/resolv.conf file. Depending on the order of
+ your setup, i.e. if you started the SUSE Manager instance prior to setting
+ up DNS services the file may not contain the appropriate
+ search directive. Double check
+ that the proper search directive exists in
+ /etc/resolv.conf. In our example the directive
+ would be search cloud.net. If the
+ directive is missing add it to the file.
+
+
+ Reference information for the DNS record update is provided
+ below.
+
+
+
+ For an update of the DNS records for the instance within the
+ DNS service of your network environment, refer to the cloud service
+ provider documentation for detailed instructions:
+
+
+
+ DNS setup on Amazon EC2
+
+
+
+ DNS setup on Google Compute Engine
+
+
+
+ DNS setup on Microsoft Azure
+
+
+
+
+
+
+ Configure SUSE Manager
+
+
+ If you run a SUSE Manager Server instance run YaST as shown
+ below after the instance is launched, the external storage is
+ attached and prepared according to
+ , and the DNS resolution
+ is set up as described earlier.
+ $ /sbin/yast2 susemanager_setup
+ Note that the setup of SUSE Manager from this point forward does
+ not differ from the documentation in the SUSE
+ Manager Guide.
+ The SUSE Manager setup procedure in YaST is designed as a one pass
+ process with no rollback or cleanup capability. Therefore, if the
+ setup procedure is interrupted or ends with an error, it is not
+ recommended to attempt a recovery as a reapate of the setup process or
+ attempts to manually "fix" the configuration are most likely to fail
+ and result in a broken SUSE Manager installation. In case of any
+ errors we recommend to start a new instance in order to run a
+ fresh setup procedure on a clean system.
+
+ If you are prompted with a message that there is not enough space
+ available for the setup of SUSE Manager verify that your root volume is
+ at least 20GB and double check that the instructions in had the desired effects.
+ SUSE Manager Server for the Public Cloud comes with a bootstrap
+ data module pre-installed that contains optimized package lists for
+ bootstrapping instances started from SUSE Linux Enterprise images
+ published by SUSE. If you intend to register such an instance,
+ make sure when creating the bootstrap repository you run the
+ mgr-create-bootstrap-repo script as follows:
+
+ $ mgr-create-bootstrap-repo --datamodule=mgr_pubcloud_bootstrap_data -c SLE-12-SP1-x86_64
+ The above example creates a bootstrap repository suitable for SUSE
+ Linux Enterprise Server 12 SP1 instances. See
+ Creating the SUSE Manger Tools Repository for more information on bootstrapping.
+
+ Prior to registering instances started from on demand images
+ remove the following packages from the instance to be registered:
+
+
+ cloud-regionsrv-client
+
+
+ For Amazon EC2
+ regionServiceClientConfigEC2
+ regionServiceCertsEC2
+
+
+ For Google Compute Engine
+ cloud-regionsrv-client-plugin-gce
+ regionServiceClientConfigGCE
+ regionServiceCertsGCE
+
+
+ For Microsoft Azure
+ regionServiceClientConfigAzure
+ regionServiceCertsAzure
+
+
+ If these packages are not removed it is possible to create interference
+ between the repositories provided by SUSE Manager and the repositories
+ provided by the SUSE operated update infrastructure.
+
+ Additionally remove the line from the
+ /etc/hosts file that contains the
+ susecloud.net reference.
+
+
+ If you run a SUSE Manager Proxy instance
+ Launch the instance, optionally with external storage configured.
+ If you use external storage (recommended), prepare it according to
+ . It is recommended but
+ not required to prepare the storage before configuring SUSE Manager proxy,
+ as the suma-storage script will migrate any existing cached data to the
+ external storage. After preparing the instance, register the system
+ with the parent SUSE Manager, which could be a SUSE Manager Server or
+ another SUSE Manager Proxy. See the SUSE Manager Proxy Setup guide for details. Once registered,
+ run
+ $ /usr/sbin/configure-proxy.sh
+ to configure your SUSE Manager Proxy instance.
+
+
+
+
+ After the completion of the configuration step, SUSE Manager should
+ be functional and running. For SUSE Manager Server, the setup process
+ created an administrator user with following user name:
+
+ User name: admin
+
+
+ Account credentials for admin user
+
+
+
+
+ Amazon EC2
+
+
+ Google Compute Engine
+
+
+ Microsoft Azure
+
+
+
+
+
+
+
+ Instance-ID
+
+
+
+
+ Instance-ID
+
+
+
+
+ Instance-Name-suma
+
+
+
+
+
+
+ The current value for the Instance-ID or
+ Instance-Name in case of the Azure Cloud, can
+ be obtained from the public cloud Web console or from within a terminal
+ session as follows:
+
+
+ Obtain instance id from within Amazon EC2 instance
+ $ ec2metadata --instance-id
+
+
+ Obtain instance id from within Google Compute Engine instance
+ $ gcemetadata --query instance --id
+
+
+ Obtain instance name from within Microsoft Azure instance
+ $ azuremetadata --instance-name
+
+
+ After logging in through the SUSE Manager Server Web UI, change the default password.
+ SUSE Manager Proxy does not have administration Web interface
+ itself. It can be managed through its parent SUSE Manager Server.
+
+
+
+ Using Separate Storage Volume
+ We recommend that the repositories and the database for SUSE Manager
+ be stored on a virtual storage device. This best practice will avoid data
+ loss in cases where the SUSE Manager instance may need to be terminated.
+ These steps must be performed prior to running the YaST SUSE Manager setup
+ procedure.
+
+
+ Provision a disk device in the public cloud environment, refer to
+ the cloud service provider documentation for detailed instructions. The
+ size of the disk is dependent on the number of distributions and channels
+ you intend to manage with SUSE Manager. For sizing information refer to
+ SUSE Manager sizing examples. A rule of thumb is 25 GB per
+ distribution per channel.
+
+
+ Once attached the device appears as Unix device node in your
+ instance. For the following command to work this device node name is
+ required. In many cases the attached storage appears as /dev/sdb. In order to check which disk devices
+ exists on your system, call the following command:
+ $ hwinfo --disk | grep -E "Device File:"
+
+
+ With the device name at hand the process of re-linking the
+ directories in the filesystem SUSE Manager uses to store data is handled
+ by the suma-storage script. In the following example we use
+ /dev/sdb as the device name.
+ $ /usr/bin/suma-storage /dev/sdb
+ After the call all database and repository files used by SUSE
+ Manager Server are moved to the newly created xfs based storage. In case
+ your instance is a SUSE Manager Proxy, the script will move the Squid
+ cache, which caches the software packages, to the newly created storage.
+ The xfs partition is mounted below the path
+ /manager_storage.
+
+
+ Create an entry in /etc/fstab (optional)
+ Different cloud frameworks treat the attachment of external storage
+ devices differently at instance boot time. Please refer to the cloud
+ environment documentation for guidance about the fstab entry.
+ If your cloud framework recommends to add an fstab entry, add the
+ following line to the /etc/fstab file.
+ /dev/sdb1 /manager_storage xfs defaults 1 1
+
+
+
+
+
+ Registration of Cloned Systems
+ Cloud environments generally support the creation of new images from a
+ running instance. Images created using this functionality are clones and
+ contain data that will have the effect that SUSE Manager will treat both
+ systems as if they were only 1 system. In order to let SUSE Manager handle
+ instances of cloned images as a new registration apply the procedure
+ outlined on the following wiki to an instance of the cloned image.
+
+
+
+
+
diff --git a/geekodoc/tests/book.suma.reference.xml b/geekodoc/tests/book.suma.reference.xml
new file mode 100644
index 0000000..36b821b
--- /dev/null
+++ b/geekodoc/tests/book.suma.reference.xml
@@ -0,0 +1,12235 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Reference Manual
+
+
+ SUSE Manager
+ 3
+
+
+
+
+
+ Copyright ©
+
+
+ SUSE LLC
+
+
+ Copyright © 2011-2014 Red Hat, Inc.
+
+
+ The text of and illustrations in this document are licensed by Red Hat
+ under a Creative Commons Attribution-Share Alike 3.0 Unported license
+ ("CC-BY-SA"). An explanation of CC-BY-SA is available at
+ . In
+ accordance with CC-BY-SA, if you distribute this document or an adaptation
+ of it, you must provide the URL for the original version.
+
+
+ This document is an adaption of original works found at
+
+ and
+
+
+ and
+ .
+
+
+ Red Hat, as a licensor of these documents, waives the right to enforce,
+ and agrees not to assert, Section 4d of CC-BY-SA to the fullest extent
+ permitted by applicable law.
+
+
+ Red Hat, Red Hat Enterprise Linux, the Shadowman logo, JBoss, MetaMatrix,
+ Fedora, the Infinity Logo, and RHCE are trademarks of Red Hat, Inc.,
+ registered in the United States and other countries. Linux® is the
+ registered trademark of Linus Torvalds in the United States and other
+ countries. Java® is a registered trademark of Oracle and/or its
+ affiliates. XFS® is a trademark of Silicon Graphics International
+ Corp. or its subsidiaries in the United States and/or other countries.
+ MySQL® is a registered trademark of MySQL AB in the United States,
+ the European Union and other countries. All other trademarks are the
+ property of their respective owners.
+
+
+ For Novell trademarks, see the Novell Trademark and Service Mark list
+ .
+ Linux* is a registered trademark of Linus Torvalds. All other third party
+ trademarks are the property of their respective owners. A trademark symbol
+ (®, ™ etc.) denotes a Novell trademark; an asterisk (*)
+ denotes a third party trademark.
+
+
+ All information found in this book has been compiled with utmost attention
+ to detail. However, this does not guarantee complete accuracy. Neither
+ Novell, Inc., SUSE LINUX Products GmbH, the authors, nor the translators
+ shall be held liable for possible errors or the consequences thereof.
+
+
+
+
+
+ About This Guide
+
+
+ SUSE® Manager enables you to efficiently manage a set of Linux systems
+ and keep them up-to-date. It provides automated and cost-effective
+ software management, asset management, system provisioning, and monitoring
+ capabilities. SUSE Manager is compatible with Red Hat Satellite Server and
+ offers seamless management of both SUSE® Linux Enterprise and Red Hat Enterprise Linux client
+ systems.
+
+
+ This manual explains the features of the Web interface and is intended for
+ SUSE Manager administrators and administrators with restricted roles for
+ specific tasks.
+ On certain topics we also provide background information,
+ while some chapters contain links to additional documentation resources.
+ The latter include additional documentation available on the installed
+ system as well as documentation on the Internet.
+
+
+ For an overview of the documentation available for your product and the
+ latest documentation updates, refer to
+ or to
+ the following section.
+
+
+ HTML versions of the manuals are also available from the
+ Help tab of the SUSE Manager Web interface.
+
+
+ Obtaining the Release Notes
+
+ Although this manual reflects the most current information possible, read
+ the SUSE Manager Release Notes for information
+ that may not have been available prior to the finalization of the
+ documentation. The notes can be found at
+ .
+
+
+
+ Available Documentation
+
+ help
+
+ Novell/SUSE manuals
+
+ Novell/SUSE manuals
+
+
+ The following manuals are available on this product:
+
+
+
+
+ ↑“Getting Started”
+
+
+
+ Lists installation scenarios and example topologies for different
+ SUSE Manager setups. Guides you step by step through the installation,
+ setup and basic configuration of SUSE Manager. Also contains detailed
+ information about SUSE Manager maintenance and troubleshooting.
+
+
+
+
+
+
+
+ Reference documentation that covers the Web interface to SUSE Manager.
+
+
+
+
+
+
+ HTML versions of the product manuals can be found in the installed system
+ under /usr/share/doc/manual. Find the latest
+ documentation updates at
+ where you can
+ download PDF or HTML versions of the manuals for your product.
+ emap:
+ there's only a link to the general SUSE documentation site. Might be
+ more convenient for customers to go directly to the SUSE site
+ instead of detouring via novell.com.
+
+
+
+ Feedback
+
+
+ Several feedback channels are available:
+
+
+
+
+ Bugs and Enhancement Requests
+
+
+ For services and support options available for your product, refer to
+ .
+
+
+
+ To report bugs for a product component, go to
+ , log in, and
+ click Create New.
+
+
+
+
+ User Comments
+
+
+ We want to hear your comments about and suggestions for this manual and
+ the other documentation included with this product. Use the User
+ Comments feature at the bottom of each page in the online documentation
+ or go to and
+ enter your comments there.
+
+
+
+
+ Mail
+
+
+ For feedback on the documentation of this product, you can also send a
+ mail to doc-team@suse.de. Make sure to include the
+ document title, the product version and the publication date of the
+ documentation. To report errors or suggest enhancements, provide a
+ concise description of the problem and refer to the respective section
+ number and page (or URL).
+
+
+
+
+
+
+
+ Documentation Conventions
+
+
+ yes
+
+
+
+ The following typographical conventions are used in this manual:
+
+
+
+
+
+ /etc/passwd: directory names and file names
+
+
+
+
+ placeholder: replace
+ placeholder with the actual value
+
+
+
+
+ PATH: the environment variable PATH
+
+
+
+
+ ls, : commands, options, and
+ parameters
+
+
+
+
+ user: users or groups
+
+
+
+
+ packagename: name of a package
+
+
+
+
+ ,
+ F1 : a key to press or a key combination;
+ keys are shown in uppercase as on a keyboard
+
+
+
+
+ File, File
+ Save As : menu items, buttons
+
+
+
+
+ This paragraph is only relevant for the x86_64 architecture. The arrows
+ mark the beginning and the end of the text block.
+
+
+ This paragraph is only relevant for the architectures
+ z Systems and POWER. The arrows
+ mark the beginning and the end of the text block.
+
+
+
+
+ Dancing Penguins (Chapter
+ Penguins, ↑Another Manual): This is a
+ reference to a chapter in another manual.
+
+
+
+
+
+
+
+
+
+ Web Interface
+
+
+ Web Interface — Navigation and Overview
+
+
+ This is a chapter about the layout of the Web Interface to SUSE Manager and basic usage
+ information.
+
+
+
+
+
+ Navigation
+
+
+ navigation
+
+
+ WebLogic
+ navigation bar
+
+
+ WebLogic
+ overview
+
+
+ The top navigation bar is divided in a left and right part. The left part contains a row of
+ tabs. SUSE Manager Administrators see as the top navigation bar.
+ Only SUSE Manager Administrators see the Users and Admin tab.
+
+
+
+ The left navigation bar is divided into pages. The links are context-sensitive. The is an example of the left navigation bar for the
+ Users tab.
+
+
+
+ Some pages have subtabs. These subtabs offer an additional layer of granularity in
+ performing tasks for systems or users. is a menu bar for all
+ System Details subtabs.
+
+
+
+
+
+
+ The right part of the top navigation contains
+ various functionalities such a quick search, links to background information, user preferences, or
+ sign off.
+
+
+
+
+
+ Views Depending on User Roles
+ This guide covers the administrator user role level, some tabs, pages, and even whole
+ categories described here may not be visible to you. Text markers are not used to identify which
+ functions are available to each user role level.
+
+
+
+
+
+
+
+
+ Categories and Pages
+
+
+ overview Web interface
+ This section summarizes all of the categories and primary pages (those linked from the top
+ and left navigation bars) within the SUSE Manager Web interface. It does not list the many subpages,
+ tabs and subtabs accessible from the left navigation bar and individual pages. Each area of the
+ Web interface is explained in detail later in this part.
+
+
+
+ Overview — View and manage your primary account information and get help.
+
+
+
+ Overview — Obtain a quick overview of your account. This page notifies you
+ if your systems need attention, provides a quick link directly to these systems, displays the
+ most recent patch alerts for your account, and recently registered systems.
+
+
+
+ Your Account — Update your personal profile, addresses, email, and
+ credentials. Deactivate your account.
+
+
+
+ Your Preferences — Indicate if you wish to receive email notifications
+ about available patches for your systems. Set how many items are displayed in system and
+ group lists. Set contents of the overview start page. Select your preferred CSV separator.
+
+
+
+
+ Locale Preferences — Configure timezone.
+
+
+
+
+
+ Your Organization — Update organization configuration and display
+ organization trusts.
+
+
+
+
+
+
+ Systems — Manage all your systems (including virtual guests) here.
+
+
+
+ Overview — View a summary of your systems or system groups showing how
+ many available patches each system has and which systems are entitled.
+
+
+
+ Systems — Select and view subsets of your systems by specific criteria,
+ such as Virtual Systems, Unentitled, Recently Registered, Proxy, and Inactive.
+
+
+
+ System Groups — List your system groups. Create additional groups.
+
+
+
+ System Set Manager — Perform various actions on sets of systems, including
+ scheduling patch updates, package management, listing and creating new groups, managing
+ channel entitlements, deploying configuration files, schedule audits, and check status.
+
+
+
+
+ Advanced Search — Quickly search all your systems by specific criteria,
+ such as name, hardware, devices, system info, networking, packages, and location.
+
+
+
+ Activation Keys — Generate an activation key for a SUSE Manager-entitled
+ system. This activation key can be used to grant a specific level of entitlement or group
+ membership to a newly registered system using the rhnreg_ks command.
+
+
+
+
+ Stored Profiles — View system profiles used to provision systems.
+
+
+
+ Custom System Info — Create and edit system information keys with
+ completely customizable values assigned while provisioning systems.
+
+
+
+ Autoinstallation — Display and modify various aspects of autoinstallation
+ profiles (Kickstart and AutoYaST) used in provisioning systems.
+
+
+
+ Software Crashes — List software crashes grouped by UUID.
+
+
+
+ Virtual Host Managers — Display and modify virtual host managers,
+ file-based or VMware-based.
+
+
+
+
+
+ Salt — View all minions. Manage on-boarding, remote commands, and states
+ catalogs.
+
+
+
+ On-boarding — View and on-board available salt minions by name, check
+ fingerprint signatures and view a minions current state (Accepted, Rejected and
+ Pending).
+
+
+
+ Remote Commands — Execute remote commands on targeted systems. Select the
+ preview button to ensure selected targets are available and click Run to execute.
+
+
+
+ State Catalog — Create, store, and manage states for your salt-minions
+ from the State Catalog.
+
+
+ Bootstrap Minions — Bootstrap minion machines using SSH. Input SSH
+ credentials and the activation key the selected system will use for its software sources.
+ SUSE Manager will install required software (salt-minion packages on the client machine) and
+ remotely perform the registration.
+
+
+
+
+
+ Patches — View and manage patch (errata) alerts here.
+
+
+
+ Patches — Lists patch alerts and downloads associated RPMs relevant to
+ your systems.
+
+
+
+ Advanced Search — Search patch alerts based on specific criteria, such as
+ synopsis, advisory type, and package name.
+
+
+
+ Manage Patches — Manage the patches for an organization's channels.
+
+
+
+
+ Clone Patches — Clone patches for an organization for ease of replication
+ and distribution across an organization.
+
+
+
+
+
+ Channels — View and manage the available SUSE Manager channels and the files
+ they contain.
+
+
+
+ Software Channels — View a list of all software channels and those
+ applicable to your systems.
+
+
+
+ Package Search — Search packages using all or some portion of the package
+ name, description, or summary, with support for limiting searches to supported platforms.
+
+
+
+
+ Manage Software Channels — Create and edit channels used to deploy
+ configuration files.
+
+
+
+ Distribution Channel Mapping — Define default base channels for servers
+ according to their operating system or architecture when registering.
+
+
+
+
+
+ Audit — View and search CVE audits and OpenSCAP scans.
+
+
+
+ CVE Audit — View a list of systems with their patch status regarding a
+ given CVE (Common Vulnerabilities and Exposures) number.
+
+
+
+ Subscription Matcher — Exposures) number.
+
+
+
+ OpenSCAP — View and search OpenSCAP (Security Content Automation Protocol)
+ scans.
+
+
+
+
+
+ Configuration — Keep track of and manage configuration channels, actions,
+ individual configuration files, and systems with SUSE Manager-managed configuration files.
+
+
+
+ Overview — A general dashboard view that shows a configuration summary.
+
+
+
+
+ Configuration Channels — List and create configuration channels from which
+ any subscribed system can receive configuration files.
+
+
+
+ Configuration Files — List and create files from which systems receive
+ configuration input.
+
+
+
+ Systems — List the systems that have SUSE Manager-managed configuration
+ files.
+
+
+
+
+
+ Schedule — Keep track of your scheduled actions.
+
+
+
+ Pending Actions — List scheduled actions that have not been completed.
+
+
+
+
+ Failed Actions — List scheduled actions that have failed.
+
+
+
+ Completed Actions — List scheduled actions that have been completed.
+ Completed actions can be archived at any time.
+
+
+
+ Archived Actions — List completed actions that have been selected to
+ archive.
+
+
+
+ Action Chains — View and edit defined action chains.
+
+
+
+
+
+ Users — View and manage users in your organization.
+
+
+
+ User List — List users in your organization.
+
+
+
+ System Group Configuration — Configure user group creation.
+
+
+
+
+
+ Admin
+ — Use the Setup Wizard to configure
+ SUSE Manager. List, create, and manage one or more SUSE Manager organizations. The SUSE Manager
+ administrator can assign channel entitlements, create and assign administrators for each
+ organization, and other tasks.
+
+
+
+ Setup Wizard — Streamlined configuration of basic tasks.
+
+
+
+ Organizations — List and create new organizations.
+
+
+
+
+ Users — List all users known by SUSE Manager, across all organizations. Click
+ individual user names to change administrative privileges of the user.
+
+ Users created for organization administration can only be configured by the
+ organization administrator, not the SUSE Manager administrator.
+
+
+
+
+ SUSE Manager Configuration — Make General configuration changes to the
+ SUSE Manager server, including Proxy settings, Certificate configuration, Bootstrap Script
+ configuration, Organization changes, and Restart the SUSE Manager server.
+
+
+
+ ISS Configuration — Configure master and slave servers for inter-server
+ synchronization.
+
+
+
+ Task Schedules — View and create schedules.
+
+
+
+ Task Engine Status — View the status of the various tasks of the SUSE Manager
+ task engine.
+
+
+
+
+
+ Show Tomcat Logs — Display the log entries of the Tomcat server, on which
+ the SUSE Manager server is running.
+
+
+
+
+
+ Help — List references to available help resources.
+
+
+
+
+
+ Patch Alert Icons
+
+
+ email address
+ explanation of
+ Throughout SUSE Manager you will see three patch (errata) alert icons.
+
+
+
+
+ Security Alert
+
+
+
+
+
+
+
+ — represents a security alert.
+
+
+
+
+ Bug Fix Alert
+
+
+
+
+
+
+
+ — represents a bug fix alert.
+
+
+
+
+ Enhancement Alert
+
+
+
+
+
+
+
+ — represents an enhancement alert.
+
+
+ On the Overview page, in the Relevant Security
+ Patches section click on the patch advisory to view details about the patch or click
+ on the number of affected systems to see which are affected by the patch alert. Both links take
+ you to tabs of the Patch Details page. If all patches are installed, there is
+ just a Patches > All link to open the Patches tab. Refer to
+ for more information.
+
+
+
+ Quick Search
+
+ In addition to the Advanced Search functionality for Packages, Patches (Errata),
+ Documentation, and Systems offered within some categories, SUSE Manager also offers a Quick Search
+ tool near the top of each page. To use it, select the search item (choose from
+ Systems, Packages, Documentation, and
+ Patches) and type a keyword to look for a name match. Click the
+ Search button. Your results appear at the bottom of the page.
+
+
+
+
+ If you misspell a word during your search query, the SUSE Manager search engine performs
+ approximate string (or fuzzy string) matching, returning results that may be similar in spelling
+ to your misspelled queries.
+ For example, if you want to search for a certain development system called
+ test-1.example.com that is registered with SUSE Manager, but you misspell your
+ query tset, the test-1.example.com system still appears in
+ the search results.
+
+ If you add a distribution or register a system with a SUSE Manager server, it may take
+ several minutes for it to be indexed and appear in search results.
+
+
+
+ For advanced System searches, refer to .
+
+
+
+ For advanced Patch or Errata searches, refer to .
+
+
+ For advanced Package searches, refer to .
+
+
+
+ For advanced Documentation searches, refer to .
+
+
+
+
+
+ Systems Selected
+ On the System Overview page, if you mark the check box next to a system,
+ the system selected number on the right area of the top navigation bar
+ increases. This number keeps track of the systems you have selected for use in the System Set
+ Manager (SSM). At any time, it identifies the number of selected systems and provides the means
+ to work (simultaneously) with an entire selection Clicking the Clear button
+ deselects all systems, while clicking the Manage button launches the System
+ Set Manager with your selected systems in place.
+ These systems can be selected in a number of ways. Only systems with at least a Management
+ system role are eligible for selection. On all system and system group lists, a Select column
+ exists for this purpose. Select the check boxes next to the systems or groups and click the
+ Update List button below the column. Each time the Systems Selected tool at
+ the top of the page changes to reflect the new number of systems ready for use in the System Set
+ Manager. Refer to for details.
+
+
+
+ Lists
+
+
+ Lists
+ explanation of
+
+ The information within most categories is presented in the form of lists. These lists have
+ some common features for navigation. For instance, you can navigate through virtually all lists
+ by clicking the back and next arrows above and below the right side of the table. Some lists also
+ offer the option to retrieve items alphabetically by clicking letters above the table.
+
+
+ Performing Large List Operations
+ Performing operations on large lists—such as removing RPM packages from the database
+ with the SUSE Manager Web interface—may take some time and the system may become unresponsive
+ or signal Internal Server Error 500
. Nevertheless, the command will succeed in
+ the background if you wait long enough.
+
+
+
+
+
+
+
+ Overview
+
+
+ Overview
+ Web interface
+
+
+ Entering the SUSE Manager URL in a browser takes you to the Sign in screen.
+ If you click on the About tab before logging in, you will find documentation
+ links, including a search function, and the option to request your login credentials if you
+ forgot either password or login. Click on Lookup Login/Password. For more
+ information, see .
+ After logging into the Web interface of SUSE Manager, the first page to appear is
+ Overview. This page contains important information about your systems,
+ including summaries of system status, actions, and patch alerts.
+
+
+ If you are new to the SUSE Manager Web interface, read to
+ familiarize yourself with the layout and symbols used throughout the interface.
+
+
+
+
+ This page is split into functional sections, with the most critical sections displayed
+ first. Users can control which of the following sections are displayed by making selections on
+ the
+ Overview
+ Your Preferences
+ page. Refer to for more information.
+
+
+
+ The Tasks section lists the most common tasks an administrator
+ performs via the Web interface. Click any link to reach the page within SUSE Manager that allows
+ you to accomplish that task.
+
+
+ If any systems have not been checking in to SUSE Manager, they are listed under
+ Inactive System to the right. Highlighting them in this way allows an
+ administrator to quickly select those systems for troubleshooting.
+
+
+ The Most Critical Systems section lists the most critical systems
+ within your organization. It provides a link to quickly view those systems and displays a
+ summary of the patch updates that have yet to be applied to those systems. Click the name of
+ the system to see its System Details page and apply the patch updates.
+
+ Below the list is a link to View All Critical Systems on one page.
+
+
+
+
+ The Recently Scheduled Actions section lists all actions less than
+ thirty days old and their status: failed, completed, or pending. Click the label of any given
+ action to view its details page. Below the list is a link to View All Scheduled
+ Actions on one page, which lists all actions that have not yet been carried out on
+ your client systems.
+
+
+ The Relevant Security Patches section lists all available security
+ patches that have yet to be applied to some or all of your client systems. It is critical that
+ you apply these security patches to keep your systems secure. Below this list find links to all
+ patches (View All Patches) and to the patches that apply to your systems
+ (View All Relevant Patches).
+
+
+ The System Group Names section lists groups you may have created and
+ indicates whether the systems in those groups are fully updated. Click the link below this
+ section to get to the System Groups page, where you can choose
+ System Groups to use with the System Set Manager.
+
+
+ The Recently Registered Systems section lists the systems added to the
+ SUSE Manager in the past 30 days. Click a system's name to see its System
+ Details page. Click the link below this section to View All Recently
+ Registered Systems on one page.
+
+
+
+ To return to this page, click Overview on the left navigation bar.
+
+
+
+
+ Your Account
+
+
+ Oracle
+ Your Account
+
+
+ WebLogic
+ Your Account
+
+
+ On the Your Account page modify your personal information, such as name,
+ password, and title. To modify any of this information, make the changes in the appropriate text
+ fields and click the Update button at the bottom.
+
+ If you change your SUSE Manager password, for security reasons you will not see the new
+ password while you enter it. Replace the asterisks in the Password and
+ Confirm Password text fields with the new password.
+
+ Should you forget your password or username, go to the login screen and click the
+ About tab, then select the Lookup Login/Password page.
+ Here you can either specify your login and email address or only your email address if you are
+ not sure about the username. Then click on Send Password or Send
+ Login respectively.
+
+
+
+ Addresses
+
+ account
+ change
+
+ Oracle
+ Addresses
+ On the Addresses page manage your mailing, billing and shipping
+ addresses, and the associated phone numbers. Click Edit this address below
+ the address to be modified, make the changes, and click Update.
+
+
+
+
+ Change Email
+
+ email address
+ change
+
+ Oracle
+ Email
+ The email address listed in the Your Account page is the address to
+ which SUSE Manager sends email notifications if you select to receive patch alerts or daily
+ summaries for your systems on the Your Preferences page.
+ To change your preferred email address, click Change Email in the left
+ navigation bar. Enter your new email address and click the Update button.
+
+
+ Invalid email addresses like those ending in
+ @localhost are filtered and rejected.
+
+
+
+
+ Credentials
+
+ account
+ credentials
+ View or enter external system or API credentials associated with your SUSE Manager account,
+ for example your SUSE Studio credentials.
+
+
+
+
+ Account Deactivation
+
+ account
+ deactivate
+
+ Oracle
+ Account Deactivation
+ The Account Deactivation page provides a means to cancel your SUSE Manager
+ service. To do so, click the Deactivate Account button. The Web interface
+ returns you to the login screen. If you attempt to log back in, an error message advises you to
+ contact the SUSE Manager administrator for your organization. Note that if you are the only
+ SUSE Manager Administrator for your organization, you are unable to deactivate your account.
+
+
+
+
+
+ Your Preferences
+
+
+ WebUI
+ Your Preferences
+
+
+ The Your Preferences page allows you to configure SUSE Manager options,
+ including:
+
+
+
+
+ Email Notifications — Determine whether you want to receive email every time
+ a patch alert is applicable to one or more systems in your account.
+
+ This setting also enables Management and Provisioning customers to receive a daily
+ summary of system events. These include actions affecting packages, such as scheduled patches,
+ system reboots, or failures to check in. In addition to selecting this check box, you must
+ identify each system to be included in this summary email. By default, all Management and
+ Provisioning systems are included in the summary. Add more systems either individually on the
+ System Details page or for multiple systems at once in the System
+ Set Manager interface. Note that SUSE Manager sends these summaries only to verified
+ email addresses. To disable all messages, simply deselect this check box.
+
+
+
+
+ SUSE Manager List Page Size — Maximum number of items that appear in a list on
+ a single page. If more items are in the list, clicking the Next button
+ displays the next group of items. This preference applies to system lists, patch lists, package
+ lists, and so on.
+
+
+
+ "Overview" Start Page — Select the information sections that are
+ displayed on the Overview Start Page. Check the box to the left of the
+ information section you would like to include.
+
+
+
+ CSV Files — Select the separator character to be used in downloadable CSV
+ files. Comma is the default; as an alternative use
+ Semicolon, which is more compatible with Microsoft Excel.
+
+
+
+ After making changes to any of these options, click the Save Preferences
+ button.
+
+
+
+ Locale Preferences
+
+ package installation
+ language
+
+ package installation
+ locale
+
+ WebLogic
+ language
+
+ WebLogic
+ locale
+
+ On the
+ Overview
+ Locale Preferences
+ page set your SUSE Manager interface to your local time by selecting the appropriate
+ Time Zone from the drop-down box, then click the Save
+ Preferences button to apply the selection.
+
+
+
+
+
+ Your Organization
+ From the Your Organization page you can modify your organization's
+ Configuration and Organization Trusts and manage custom
+ Salt states distributed across an organization.
+
+
+ Configuration
+ On the Configuration page modify your personal information, such as
+ name, password, and title. To modify any of this information, make the changes in the
+ appropriate text fields and click the Update button at the bottom.
+
+
+
+
+ Organization Trusts
+
+ The Organization Trusts page displays the trusts established with your
+ organization (that is, the organization with which you, the logged-in user, are associated). The
+ page also lists Channels Shared, which refers to channels available to your
+ organization via others in the established trusts.
+
+ You can filter the list of trusts by keyword using the Filter by
+ Organization text box and clicking Go.
+
+
+
+
+
+
+
+
+ Custom States
+ The Custom States page displays states which have been created and added
+ using the Salt > State Catalog. From this page you can
+ select which states should be applied across your organization. A state applied from this page
+ will be applied to all minions within an organization.
+
+ Keep in mind states are applied according to the following order of hierarchy within SUSE
+ Manager. Organization > Group > Single
+ System
+
+
+
+ Apply a Custom State at the Organization Level
+
+ Create a state using the Salt > State Catalog or
+ via the command line.
+
+
+ Browse to Overview > Your Organization >
+ Custom States.
+
+
+ Use the search feature to located a state by name or click the Search
+ button to list all available states.
+
+
+ Select the checkbox for the state to be applied and click the Save
+ button. The save button will save the change to the database but will not apply the
+ state.
+
+
+ Apply the state by clicking the Apply button. The state will be
+ scheduled and applied to any systems included within the organization.
+
+
+
+
+
+
+
+
+
+
+
+ Systems
+
+
+
+ Web Interface
+ Systems
+
+ If you click the Systems tab on the top navigation bar, the
+ Systems category and links appear. Here you can select systems to perform
+ actions on them and create system profiles.
+
+
+ Overview
+
+
+ SUSE Manager Administrator
+ overview
+
+
+ SUSE Manager Administrator
+ Systems Overview
+
+ Web Interface
+ Systems Overview
+
+
+ The Overview page provides a summary of your systems, including their
+ status, number of associated patches (errata) and packages, and their so-called system type.
+ Clicking on the name of a system takes you to its System Details page. Refer
+ to for more information.
+
+ Clicking View System Groups at the top of the
+ Overview page takes you to a similar summary of your system groups. It
+ identifies group status and displays the number of systems contained. Then, clicking on the
+ number of systems takes you to the Systems tab of the System Group
+ Details page, while clicking on the group name takes you to the
+ Details tab for that system group. Refer to for more information.
+
+ You can also click on Use in SSM in the System Groups
+ section to go directly to the System Set Manager. Refer to for more information.
+
+
+
+ Systems
+
+
+ system list
+
+
+ SUSE Manager Administrator
+ viewing a list of
+
+
+ SUSE Manager Administrator
+ System List
+
+
+ SUSE Manager Administrator
+ status
+
+
+ Web Interface
+ System List
+
+
+ The Systems page displays a list of all your
+ registered systems. Several columns provide information for each
+ system:
+
+
+
+ Select box: Systems without a system type cannot be selected. To select systems, mark the
+ appropriate check boxes. Selected systems are added to the System Set
+ Manager, where actions can be carried out simultaneously on all systems in the set.
+ Refer to for details.
+
+
+
+ System: The name of the system specified during registration. The default
+ name is the hostname of the system. Clicking on the name of a system displays its
+ System Details page. Refer to for
+ more information.
+
+
+
+ Updates: Shows which type of update action is applicable to the system or
+ confirms that the system is up-to-date. Some icons are linked to related tasks. For instance,
+ the standard Updates icon is linked to the Upgrade subtab of the packages
+ list, while the Critical Updates icon links directly to the Software Patches
+ page where you can Apply Patches. The Not Checking In icon is linked to
+ instructions for resolving the issue.
+
+
+
+
+ Check Circle
+
+
+
+
+
+
+
+ — System is up-to-date.
+
+
+
+
+ Exclamation Circle
+
+
+
+
+
+
+
+ — Critical patch (errata) available, update
+ strongly recommended.
+
+
+
+
+ Warning
+
+
+
+
+
+
+
+ — Updates available and recommended.
+
+
+
+ Question
+
+
+
+
+
+
+
+ — System not checking in properly (for 24 hours or more).
+
+
+
+ Lock
+
+
+
+
+
+
+
+ — System is locked; actions prohibited.
+
+
+
+ Rocket
+
+
+
+
+
+
+
+ — System is being deployed using AutoYaST or Kickstart.
+
+
+
+ Clock
+
+
+
+
+
+
+
+ — Updates have been scheduled.
+
+
+
+ Times
+
+
+
+
+
+
+
+ — System not entitled to any update service.
+
+
+
+
+
+ Patches — Total number of patch alerts applicable to the system.
+
+
+
+ Packages: Total number of package updates for the system, including packages
+ related to patch alerts as well as newer versions of packages not related to patch alerts. For
+ example, if a client system that has an earlier version of a package installed gets subscribed
+ to the appropriate base channel of SUSE Manager (such as SUSE Linux Enterprise 12 SP1), that channel may have an
+ updated version of the package. If so, the package appears in the list of available package
+ updates.
+
+ If SUSE Manager identifies package updates for the system, but the package updater (such as
+ Red Hat Update Agent or YaST) responds with a message like "Your system is fully
+ updated", a conflict likely exists in the system's package profile or in the
+ up2date configuration file. To resolve the conflict, either schedule a
+ package list update or remove the packages from the package exceptions list. Refer to for instructions.
+
+
+
+
+
+ Configs: Total number of configuration files applicable to the system.
+
+
+
+
+
+ Crashes:
+
+
+
+
+ Base Channel: The primary channel for the system based on its operating
+ system. Refer to for more information.
+
+
+
+ System Type: Shows whether or not the system is managed and at what service
+ level.
+
+
+
+ Links in the left navigation bar below Systems enable you to select and
+ view predefined sets of your systems.
+
+
+
+
+
+ All
+ The All page contains the default set of your systems. It displays
+ every system you have permission to manage. You have permission if you are the only user in your
+ organization, if you are a SUSE Manager Administrator, or if the system belongs to a group for
+ which you have admin rights.
+
+
+
+
+ Physical Systems
+ To reach this page, select the Systems tab, followed by the
+ Systems subtab from the left navigation bar, and finally select
+ Physical Systems from the left navigation bar. This page lists each physical
+ system of which SUSE Manager is aware.
+
+
+
+
+
+ Virtual Systems
+ To reach this page, select the Systems tab, followed by the
+ Systems subtab from the left navigation bar, and finally select
+ Virtual Systems from the left navigation bar. This page lists each virtual
+ host of which SUSE Manager is aware and the guest systems on those hosts.
+
+
+ System
+
+
+ This column displays the name of each guest system.
+
+
+
+ Updates
+
+
+ This column shows whether there are patches (errata updates) available for the guest
+ systems that have not yet been applied.
+
+
+
+ Status
+
+
+ This column indicates whether a guest is running, paused, or stopped.
+
+
+
+ Base Channel
+
+
+ This column displays the base channel to which the guest is currently subscribed.
+
+
+
+
+ Only guests registered with SUSE Manager are displayed with blue text. Clicking on the
+ hostname of such a guest system displays its System Details page.
+
+
+
+
+ Bare Metal Systems
+ Here, all unprovisioned (bare-metal) systems with hardware details are listed. For more
+ information on bare-metal systems, see .
+
+
+
+
+ Out of Date
+ The Out of Date page displays all systems where applicable patch alerts
+ have not been applied.
+
+
+
+
+ Requiring Reboot
+ Systems listed here need rebooting. Click on the name for details, where you can also
+ schedule a reboot.
+
+
+
+
+ Non-compliant Systems
+ Non-compliant systems have packages installed which are not available from SUSE Manager.
+ Packages shows how many installed packages are not available in the channels
+ assigned to the system. A non-compliant system cannot be reinstalled.
+
+
+
+
+ Without System Type
+ The Without System Type page displays systems that have ...
+
+
+
+
+
+
+ Ungrouped
+ The Ungrouped page displays systems not yet assigned to a specific
+ system group.
+
+
+
+
+ Inactive
+ The Inactive page displays systems that have not checked in with
+ SUSE Manager for 24 hours or more. Checking in means that YaST Online Update on SUSE Linux Enterprise or Red Hat
+ Update Agent on Red Hat Enterprise Linux client systems connects to SUSE Manager to see if there are any updates
+ available or if any actions have been scheduled. If you see a message telling you that check-ins
+ are not taking place, the client system is not successfully connecting to SUSE Manager. The reason
+ may be one of the following:
+
+
+ The system is not entitled to any SUSE Manager service. System profiles that remain
+ unentitled for 180 days (6 months) are removed.
+
+
+ The system is entitled, but the SUSE Manager daemon (rhnsd) has
+ been disabled on the system.
+
+
+
+
+ The system is behind a firewall that does not allow connections over
+ https (port 443).
+
+
+ The system is behind an HTTP proxy server that has not been properly configured.
+
+
+ The system is connected to a SUSE Manager Proxy Server or SUSE Manager that has not been
+ properly configured.
+
+
+ The system itself has not been properly configured, perhaps pointing at the wrong
+ SUSE Manager Server.
+
+
+ The system is not in the network.
+
+
+ Some other barrier exists between the system and the SUSE Manager Server.
+
+
+
+
+
+
+ Recently Registered
+ The Recently Registered page displays any systems that have been
+ registered in a given period. Use the drop-down menu to specify the period in days, weeks, 30-
+ and 180-day increments, and years.
+
+
+
+
+ Proxy
+ The Proxy page displays the SUSE Manager Proxy Server systems registered
+ with your SUSE Manager server.
+
+
+
+
+ Duplicate Systems
+ The Duplicate Systems page lists current systems and any active and
+ inactive entitlements associated with them. Active entitlements are in gray, while inactive
+ entitlements are highlighted in yellow and their check boxes checked by default for you to
+ delete them as needed by clicking the Delete Selected button. Entitlements
+ are inactive if the system has not checked in with SUSE Manager in a time specified via the
+ drop-down list A system profile is inactive if its system has not checked in
+ for:.
+ You can filter duplicate entitlements by IP Address,
+ Hostname, or MAC address by clicking on the respective
+ subtab. You may filter further by inactive time or typing the system's hostname, IP address, or
+ MAC address in the corresponding Filter by: text box.
+ To compare up to three duplicate entitlements at one time, click the Compare
+ Systems link in the Last Checked In column. Inactive components of
+ the systems are highlighted in yellow. You can then determine which systems are inactive or
+ duplicate and delete them by clicking the Delete System Profile button. Click
+ the Confirm Deletion button to confirm your choice.
+
+
+
+
+ System Currency
+ The System Currency Report displays an overview of severity scores of patches relevant to
+ the system. The weighting is defined via the System Details page. The default
+ weight awards critical security patches with the heaviest weight and enhancements with the
+ lowest. The report can be used to prioritize maintenance actions on the systems registered to
+ SUSE Manager.
+
+
+
+ System Types
+ System Types define the set of functionalities available for each system in SUSE Manager such
+ as the ability of installing software or creating guest virtual machines.
+ A list of profiled systems follows, with their base and add-on system types shown in the
+ appropriate columns. To change system types, select the systems you wish to modify, and choose
+ either Add System Type or Remove System Type
+
+
+
+
+
+
+ System Details
+ Once systems are registered to SUSE Manager, they are displayed on the
+ Systems
+ Overview
+ page. Here and on any other page, clicking the name takes you to the
+ System Details page of the client, where all kinds of administrative tasks
+ can be performed, including the removal of a system.
+
+ The Delete System link in the upper right of this screen refers to the
+ system profile only. Deleting a host system profile will not destroy or remove the registration
+ of guest systems. Deleting a guest system profile does not remove it from the list of guests for
+ its host, nor does it stop or pause the guest. It does, however, remove your ability to manage
+ it via SUSE Manager.
+
+ If you mistakenly deleted a system profile from SUSE Manager, you may
+ re-register the system using the bootstrap script
+ or rhnreg_ks manually.
+
+ The Details page has numerous subtabs that provide specific system information as well as
+ other identifiers unique to the system. The following sections discuss these tabs and their
+ subtabs in detail.
+
+
+ System Details > Details
+
+
+ deleting a system
+
+ SUSE Manager Administrator
+ Details
+ This page is not accessible from any of the standard navigation bars. However, clicking on
+ the name of a system anywhere in the Web interface displays this page. By default the
+ Details
+ Overview
+ subtab is displayed. Other tabs are available, depending on the system type and
+ add-on system type.
+
+
+ System Details > Details
+ > Overview
+ This system summary page displays the system status message and the following key
+ information about the system:
+
+ System Status
+
+
+
+
+
+ This message indicates the current state of your system in relation to SUSE Manager.
+
+ If updates are available for any entitled system, the message Software
+ Updates Available appears, displaying the number of critical and non-critical
+ updates as well as the sum of affected packages. To apply these updates, click on
+ Packages and select some or all packages to update, then click
+ Upgrade Packages.
+
+
+
+
+
+ System Info
+
+ Hostname
+
+ The hostname as defined by the client system.
+
+
+
+
+
+ IP Address
+
+ The IP address of the client.
+
+
+
+ IPv6 Address
+
+ The IPv6 address of the client.
+
+
+
+ Virtualization
+
+ If the client is a virtual machine, the type of virtualization is listed.
+
+
+
+
+ Kernel
+
+ The kernel installed and operating on the client system.
+
+
+
+ SUSE Manager System ID
+
+ A unique identifier generated each time a system registers with SUSE Manager.
+
+ The system ID can be used to eliminate duplicate profiles from SUSE Manager. Compare the
+ system ID listed on this page with the information stored on the client system in the
+ /etc/sysconfig/rhn/systemid file. In that file, the system's current
+ ID is listed under system_id. The value starts after the characters
+ ID-. If the value stored in the file does not match the value listed in
+ the profile, the profile is not the most recent one and may be removed.
+
+
+
+
+ Activation Key
+
+ Displays the activation key used to register the system.
+
+
+
+ Installed Products
+
+ Lists the products installed on the system.
+
+
+
+ Lock Status
+
+ Indicates whether a system has been locked.
+ Actions cannot be scheduled for locked systems on the Web interface until the lock is
+ removed manually. This does not include preventing automated patch updates scheduled via the
+ Web interface. To prevent the application of automated patch updates, deselect Auto
+ Patch Update from the
+ System Details
+ Details
+ Properties
+ subtab. For more information, refer to .
+ Locking a system can prevent you from accidentally changing a system. For example, the
+ system may be a production system that should not receive updates or new packages until you
+ decide to unlock it.
+
+ Locking a system in the Web interface will not prevent any
+ actions that originate from the client system. For example, if a user logs into the client
+ directly and runs YaST Online Update (on SLE) or pup (on RHEL),
+ the update tool will install available patches whether or not the system is locked in the
+ Web interface.
+ Locking a system does not restrict the number of users who can
+ access the system via the Web interface. If you wish to restrict access to the system,
+ associate that system with a System Group and assign a System Group Administrator to it.
+ Refer to for more information about System
+ Groups.
+
+ It is also possible to lock multiple systems via the System Set Manager. Refer to
+ for instructions.
+
+
+
+
+ Subscribed Channels
+
+
+
+
+
+ List of subscribed channels. Clicking on a channel name takes you to the
+ Basic Channel Details page. To change subscriptions, click the
+ (Alter Channel Subscriptions) link right beside the title to assign
+ available base and child channels to this system. When finished making selections, click the
+ Change Subscriptions button to change subscriptions and the base
+ software channel. For more information, refer to .
+
+
+
+ Base Channel
+
+ The first line indicates the base channel to which this system is subscribed. The base
+ channel should match the operating system of the client.
+
+
+
+ Child Channels
+
+ The subsequent lines of text, which depend on the base channel, list child channels.
+ An example is the SUSE Manager Tools channel.
+
+
+
+
+ System Events
+
+ Checked In
+
+ The date and time at which the system last checked in with SUSE Manager.
+
+
+
+ Registered
+
+ The date and time at which the system registered with SUSE Manager and created this
+ profile.
+
+
+
+ Last Booted
+
+ The date and time at which the system was last started or restarted.
+
+ Systems with a Management entitlement can be rebooted from this screen.
+
+
+ Select Schedule system reboot.
+
+
+ Provide the earliest date and time at which the reboot may take place.
+
+
+ Click the Schedule Reboot button in the lower right.
+
+
+ When the client checks in after the scheduled start time, SUSE Manager will instruct the
+ system to restart itself.
+
+
+
+
+
+ The so-called OSA status
+ is also displayed for client systems registered with SUSE Manager that have the OSA dispatcher
+ (osad) configured.
+
+
+
+ Push enables SUSE Manager customers to immediately initiate tasks
+ rather than wait for those systems to
+ check in with SUSE Manager. Scheduling actions through push is identical to the process of
+ scheduling any other action, except that the task can immediately be carried out instead of
+ waiting the set interval for the system to check in.
+ In addition to the configuration of SUSE Manager, in order to receive pushed actions each
+ client system must have the osad package installed and its service
+ started.
+
+ System Properties
+
+ System Types
+
+ Lists system types and add-on types currently applied to the system.
+
+
+
+ Notifications
+
+ Indicates the notification options for this system. You can choose whether you wish to
+ receive email notifying you of available updates for this system. In addition, you may
+ choose to include systems in the daily summary email.
+
+
+
+ Contact Method
+
+ Available methods: Pull, Push via SSH, and Push via SSH tunnel.
+
+
+
+ Auto Patch Update
+
+ Indicates whether this system is configured to accept updates automatically.
+
+
+
+ System Name
+
+ By default, the hostname of the client is displayed, but a different system name can
+ be assigned.
+
+
+
+ Description
+
+ This information is automatically generated at registration. You can edit the
+ description to include any information you wish.
+
+
+
+ Location
+
+ This field displays the physical address of the system if specified.
+
+
+
+ Clicking the Edit These Properties link right beside the
+ System Properties title opens the
+ System Details
+ Properties
+ subtab. On this page, edit any text you choose, then click the Update
+ Properties button to confirm.
+
+
+ System Details > Details
+ > Properties
+
+ SUSE Manager Administrator
+ Updating Properties
+ This subtab allows you to alter the following basic properties of your system:
+
+ System Details
+
+ System Name
+
+ By default, this is the hostname of the system. You can however alter the profile name
+ to anything that allows you to distinguish this system from others.
+
+
+
+ Base System Type
+
+ For information only.
+
+
+
+ Add-on System Types
+
+ Select one of the available system types such as Virtualization.
+
+
+
+
+ Notifications
+
+ Select whether notifications about this system should be sent and whether to include
+ this system in the daily summary.
+
+ This setting keeps you aware of all advisories pertaining to the system. Anytime an update
+ is released for the system, you receive an email notification.
+ The daily summary reports system events that affect packages, such as scheduled patch
+ updates, system reboots, or failures to check in. In addition to including the system here,
+ you must choose to receive email notification in the Your Preferences
+ page of the Overview category.
+
+
+
+ Contact Method
+
+ Select between Pull, Push via SSH, and
+ Push via SSH tunnel.
+
+
+
+ Auto Patch Update
+
+ If this box is checked, available patches are automatically applied to the system when
+ it checks in (Pull) or immediately if you select either Push option. This action takes place
+ without user intervention. The SUSE Manager Daemon (rhnsd) must be
+ enabled on the system for this feature to work.
+
+ Conflicts With Third Party Packages
+ Enabling auto-update might lead to failures because of conflicts between system
+ updates and third party packages. To avoid failures caused by those issues, it is better to
+ leave this box unchecked.
+
+
+
+
+ Description
+
+ By default, this text box records the operating system, release, and architecture of
+ the system when it first registers. Edit this information to include anything you like.
+
+
+
+
+ The remaining fields record the physical address at which the system is stored. To
+ confirm any changes to these fields, click the Update Properties button.
+
+ Setting Properties for Multiple Systems
+ Many of these properties can be set for multiple systems in one go via the System Set
+ Manager interface. For details, see .
+
+
+
+ System Details > Details
+ > Remote Command
+
+ This subtab allows you to run a remote command on the system.
+ Before doing so, you
+ must first configure the system to accept such commands.
+
+
+ On SLE clients, subscribe the system to the SUSE Manager Tools child channel and use
+ zypper to install the rhncfg,
+ rhncfg-client, and rhncfg-actions packages, if not
+ already installed:
+ zypper in rhncfg rhncfg-client rhncfg-actions
+ On RHEL clients, subscribe the system to the Tools child channel and use
+ up2date or yum to install the
+ rhncfg, rhncfg-client, and
+ rhncfg-actions packages, if not already installed:
+ yum install rhncfg rhncfg-client rhncfg-actions
+
+
+ Log into the system as root and add the following file to the local SUSE Manager
+ configuration directory: allowed-actions/scripts/run.
+
+
+ Create the necessary directory on the target system:
+ mkdir -p /etc/sysconfig/rhn/allowed-actions/script
+
+
+ Create an empty run file in that directory to act as a flag to
+ SUSE Manager, signaling permission to allow remote commands:
+ touch /etc/sysconfig/rhn/allowed-actions/script/run
+
+
+
+
+ Once the setup is complete, refresh the page in order to view the text fields for remote
+ commands. Identify a specific user, group, and timeout period, as well as the script to run.
+ Select a date and time to execute the command, then click Schedule or add
+ the remote command to an action chain. For further information on action chains, refer to .
+
+
+ System Details > Details
+ > Reactivation
+
+
+ reactivating
+ systems
+
+ SUSE Manager Administrator
+ Reactivation
+ Reactivation
+ keys include
+ this system's ID, history, groups, and channels. This key can then be used only once with the
+ rhnreg_ks command line utility to re-register this system and regain all
+ SUSE Manager settings. Unlike typical activation keys, which are not associated with a specific
+ system ID, keys created here do not show up within the Activation Keys page.
+ Reactivation keys can be combined with activation keys to aggregate the settings of
+ multiple keys for a single system profile. For example:
+ rhnreg_ks --server=server-url \
+ --activationkey=reactivation-key,activationkey --force
+
+ When autoinstalling a system with its existing SUSE Manager profile, the profile uses the
+ system-specific activation key created here to re-register the system and return its other
+ SUSE Manager settings. For this reason, you should not regenerate, delete, or use this key (with
+ rhnreg_ks) while a profile-based autoinstallation is in progress. If you
+ do, the autoinstallation will fail.
+
+
+
+ System Details > Details
+ > Hardware
+
+ hardware profile
+ Updating on server
+
+ SUSE Manager Administrator
+ Updating hardware profile
+ This subtab provides detailed information about the system, including networking, BIOS,
+ memory, and other devices but only if you included the hardware profile for this machine during
+ registration. If the hardware profile looks incomplete or outdated, click the Schedule
+ Hardware Refresh button. The next time the SUSE Manager Daemon
+ (rhnsd) connects to SUSE Manager, it will update your system profile
+ with the latest hardware information.
+
+
+ System Details > Details
+ > Migrate
+ This subtab provides the option to migrate systems between organizations. Select an
+ Organization Name and click Migrate System to initiate
+ the migration.
+
+
+ Defined system details such as channel assignments, system group membership, custom data
+ value, configuration channels, reactivation keys, and snapshots will be dropped from the
+ system configuration after the migration.
+
+
+
+
+ System Details > Details
+ > Notes
+
+ navigation
+ about systems
+
+ SUSE Manager Administrator
+ Notes
+ This subtab provides a place to create notes about the system. To add a new note, click
+ the Create Note link, type a subject and write your note, then click the
+ Create button. To modify a note, click on its subject in the list of notes,
+ make your changes, and click the Update button. To remove a note, click on
+ its subject in the list of notes then click the delete note link.
+
+
+ System Details > Details
+ > Custom Info
+
+
+ changing email address
+ about systems
+
+ SUSE Manager Administrator
+ Custom Information
+ This subtab provides
+ completely customizable information about the system. Unlike Notes,
+ Custom Info is structured, formalized, and can be searched. Before you can
+ provide custom information about a system, you must have Custom Information
+ Keys. Click on Custom System Info in the left navigation bar.
+ Refer to for instructions.
+ Once you have created one or more keys, you may assign values for this system by
+ selecting the Create Value link. Click the name of the key in the resulting
+ list and enter a value for it in the Description field, then click the
+ Update Key button.
+
+
+ System Details > Details
+ > Proxy
+
+ This tab is only available for SUSE Manager Proxy systems and lists all clients registered
+ with the selected SUSE Manager Proxy server.
+
+
+
+
+ System Details > Software
+
+ This tab and its subtabs allow you to manage the software on the system: patches (errata),
+ packages and package profiles, software channel memberships, and service pack (SP) migrations.
+
+ System Details > Software
+ > Patches
+ This subtab contains a list of patch (errata) alerts applicable to the system. Refer to
+ for meanings of the icons on this tab. To apply
+ updates, select them and click the Apply Patches button. Double-check the
+ updates to be applied on the confirmation page, then click the Confirm
+ button. The action is added to the Pending Actions list under
+ Schedule. Patches that have been scheduled cannot be selected for update.
+ Instead of a check box there is a clock icon. Click on the clock to see the Action
+ Details page.
+ A Status column in the patches table shows whether an update has been
+ scheduled. Possible values are: None, Pending, Picked Up, Completed, and Failed. This column
+ displays only the latest action related to a patch. For instance, if an action fails and you
+ reschedule it, this column shows the status of the patch as Pending with no
+ mention of the previous failure. Clicking a status other than None takes you
+ to the Action Details page. This column corresponds to the one on the
+ Affected Systems tab of the Patch Details page.
+
+
+ System Details > Software
+ > Packages
+
+ package installation
+ Updating on server
+
+ SUSE Manager Administrator
+ Updating package list
+ Manage the software packages on the system. Most of the following actions can also be
+ performed via action chains. For further information on action chains, refer to .
+
+ When new packages or updates are installed on the client via SUSE Manager, any licenses
+ (EULAs) requiring agreement before installation are automatically accepted.
+
+
+
+ Packages
+
+ The default display of the Packages tab describes the options
+ available and provides the means to update your package list. To update or complete a
+ potentially outdated list, possibly due to the manual installation of packages, click the
+ Update Package List button in the bottom right-hand corner of this page.
+ The next time the SUSE Manager daemon (rhnsd) connects to SUSE Manager,
+ it updates your system profile with the latest list of installed packages.
+
+
+
+ List / Remove
+
+ Lists installed packages and enables you to remove them. View and sort packages by
+ name or the date they were installed on the system. Search for
+ the desired packages by typing its name in the Filter by Package Name
+ text box, or by clicking the letter or number corresponding to the first character of the
+ package name. Click on a package name to view its Package Details page.
+ To delete packages from the system, select their check boxes and click the Remove
+ Packages button on the bottom right-hand corner of the page. A confirmation page
+ appears with the packages listed. Click the Confirm button to remove the
+ packages.
+
+
+
+ Upgrade
+
+ Displays a list of packages with newer versions available in the subscribed channels.
+ Click on the latest package name to view its Package Details page. To
+ upgrade packages immediately, select them and click the Upgrade Packages
+ button. Any EULAs will be accepted automatically.
+
+
+
+ Install
+
+ Install new packages on the system from the available channels. Click on the package
+ name to view its Package Details page. To install packages, select them
+ and click the Install Selected Packages button. EULAs are automatically
+ accepted.
+
+
+
+ Verify
+
+ Validates the packages installed on the system against its RPM database. This is the
+ equivalent of running rpm -V. The metadata of the system's packages are
+ compared with information from the database, such as file checksum, file size, permissions,
+ owner, group and type. To verify a package or packages, select them, click the
+ Verify Selected Packages button, and confirm. When the check is
+ finished, select this action in the History subtab under
+ Events to see the results.
+
+
+
+ Lock
+
+ Locking a package prevents modifications like removal or update of the package. Since
+ locking and unlocking happens via scheduling requests, locking might take effect with some
+ delay. If an update happens before then, the lock will have no effect. Select the packages
+ you want to lock. If locking should happen later, select the date and time above the
+ Request Lock button, then click on it. A small lock icon marks locked
+ packages. To unlock, select the package and click Request Unlock,
+ optionally specifying the date and time for unlocking to take effect.
+
+ This feature only works if zypper is used as the package manager. On the target
+ machine the zypp-plugin-spacewalk package must be installed (version
+ 0.9.x or higher).
+
+
+
+
+ Profiles
+
+ Compare installed packages with the package lists in stored profiles and other
+ systems. Select a stored profile from the drop-down menu and click the
+ Compare button. To compare with packages installed on a different
+ system, select the system from the associated drop-down menu and click the
+ Compare button. To create a stored profile based on the existing system,
+ click the Create System Profile button, enter any additional information
+ you desire, and click the Create Profile button. These profiles are kept
+ within the Stored Profiles page linked from
+ the left navigation bar.
+
+
+ Once installed packages have been compared with a profile,
+ customers have the option to synchronize the selected system
+ with the profile. All changes apply to the system not the
+ profile. Packages might get deleted and additional packages
+ installed on the system. To install only specific packages,
+ click the respective check boxes in the profile. To remove
+ specific packages installed on the system, select the check
+ boxes of these packages showing a difference of This
+ System Only. To completely synchronize the system's
+ packages with the compared profile, select the master check box
+ at the top of the column. Then click the Sync Packages
+ to button. On the confirmation screen, review the
+ changes, select a time frame for the action, and click the
+ Schedule Sync button.
+
+
+ You can use a stored profile as a template for the files to be
+ installed on a autoinstalled system; see .
+
+
+
+
+
+ Non Compliant
+
+ Lists packages that are installed on this system and are not present in any of its
+ channels.
+
+
+
+
+
+ System Details > Software
+ > Software Channels
+ Software channels provide a well-defined method to determine which packages should be
+ available to a system for installation or upgrade based on its operating systems, installed
+ packages, and functionality. Click a channel name to view its Channel
+ Details page. To modify the child channels associated with this system, use the
+ check boxes next to the channels and click the Change Subscriptions button.
+ You will receive a success message or be notified of any errors. To change the system's base
+ channel, select the new one from the drop-down menu and confirm. Refer to for more information.
+
+
+ System Details > Software
+ > SP Migration
+
+ Service Pack Migration (SP Migration) allows you to upgrade a system from one service
+ pack to another.
+
+ During migration SUSE Manager automatically accepts any required licenses (EULAs) before
+ installation.
+
+ Beginning with SLE 12 SUSE supports service pack skipping, it is now possible to
+ migrate from e.g., SLE 12 to SLE 12 SP2. Note that SLE 11 may only be migrated step by step and
+ individual service packs should not be skipped. Supported migrations include any of the
+ following:
+
+
+ SLE 11 > SLE 11 SP1 > SLE 11 SP2 > SLE 11 SP3 > SLE 11 SP4
+
+
+ SLE 12 > SLE 12 SP1 > SLE 12 SP2
+
+
+
+ Migrating from an Earlier Version of SLES
+
+
+ It is not possible to migrate, for example, from SLE 11 to SLE_12 using this tool. You must use
+ autoYaST to perform a migration on this level.
+
+
+
+ Rollback Not Possible
+ The migration feature does not cover any rollback functionality. Once the migration
+ procedure is started, rolling back is not possible. Therefore it is recommended to have a
+ working system backup available for an emergency.
+
+
+ Performing a Migration
+
+ From the Systems Overview page, select a traditionally managed
+ client. Currently migration of Salt systems is not supported.
+
+
+ Select the Software tab then the SP Migration
+ tab.
+
+
+ Select your target migration path and click Target Migration
+
+
+ On the Service Pack Migration - Channels view select the correct base
+ channel, including Mandatory Child Channels and any additional
+ Optional Child Channels. Select Schedule Migration
+ once your channels have been configured properly.
+
+
+
+
+ System Details > Software
+ > Software Crashes
+
+ Red Hat clients can be configured to report software failures to SUSE Manager via the
+ Automatic Bug Reporting Tool (ABRT) to extend the overall reporting functionality of your
+ systems. This functionality is not supported on SUSE Linux Enterprise systems. If configured appropriately, Red
+ Hat clients automatically report software failures captured by ABRT and process the captured
+ failures in a centralized fashion on SUSE Manager. You can use either the Web interface or the API
+ to process these failure reports.
+
+
+
+
+
+
+
+
+
+ System Details > Configuration
+
+
+ This tab and its subtabs assist in managing the configuration files associated with the
+ system. These configuration files may be managed solely for the current system or distributed
+ widely via a Configuration Channel. The following sections describe these and other available
+ options on the
+ System Details
+ Configuration
+ subtabs.
+
+ To manage the configuration of a system, it must have the latest
+ rhncfg* packages installed. Refer to for instructions on enabling and disabling scheduled
+ actions for a system.
+
+ This section is available to normal users with access to systems that have configuration
+ management enabled. Like software channels, configuration channels store files to be installed
+ on systems. While software updates are provided by SCC, configuration files are managed solely
+ by you. Also unlike with software packages, various versions of configuration files may prove
+ useful to a system at any given time. Only the latest version can be deployed.
+
+ System Details > Configuration
+ > Overview
+ This subtab provides access to the configuration files of your system and to the most
+ common tasks used to manage configuration files. In the Configuration
+ Overview, click on the Add links to add files, directories, or
+ symlinks. Here you also find shortcuts to perform any of the common configuration management
+ tasks listed on the right of the screen by clicking one of the links under
+ Configuration Actions.
+ emap 2014-04-22: much has changed here, but I've got no system with configuration files
+ available, so I'm going by the descriptions in the web-ui.
+ ke 2016-11-09: checked. updated where appropriate, but I did not mention every detail.
+ Check bug 1009102 and 1009118, once resolved; maybe, button texts will
+ change!
+
+
+ System Details > Configuration
+ > View/Modify Files
+ This subtab lists all configuration files currently associated with the system. These are
+ sorted via subtabs in centrally and locally managed files and a local sandbox for files under
+ development.
+ Using the appropriate buttons on a subtab, you can copy from one to the other subtabs.
+
+
+ Centrally-Managed Files
+
+ Centrally-managed configuration files are provided by global configuration channels.
+ Determine which channel provides which file by examining the Provided By
+ column below. Some of these centrally-managed files may be overridden by locally-managed
+ files. Check the Overridden By column to find out if any files are
+ overridden, or click Override this file to provide such an overriding
+ file.
+
+
+
+ Locally-Managed Files
+
+ Locally-managed configuration files are useful for overriding centrally-managed
+ configuration profiles that cause problems on particular systems. Also, locally-managed
+ configuration files are a method by which system group administrators who do not have
+ configuration administration privileges can manage configuration files on the machines they
+ are able to manage.
+
+
+
+ Local Sandbox
+
+ In the sandbox you can store configuration files under development. You can promote
+ files from the sandbox to a centrally-managed configuration channel using Copy
+ Latest to Central Channel. After files in this sandbox have been promoted to a
+ centrally-managed configuration channel, you will be able to deploy them to other systems.
+ Use Copy Latest to System Channel if you want to install a
+ configuration on the local system only. Once done, the file will end up on the
+ Locally-Managed Files subtab.
+
+
+
+
+
+ System Details > Configuration
+ > Add Files
+ To upload, import or create new configuration files, click on Add
+ Files.
+
+
+ Upload File
+
+ To upload a configuration file from your local machine, browse for the upload file,
+ specify whether it is a text or binary file, enter Filename/Path as well
+ as user and group ownership. Specific file permissions can be set. When done, click
+ Upload Configuration File.
+
+
+
+ Import Files
+
+ Via the Import Files tab, you can add files from the system you
+ have selected before and add it to the sandbox of this system. Files will be imported the
+ next time rhn_check runs on the system. To deploy these files or override
+ configuration files in global channels, copy this file into your local override channel
+ after the import has occurred.
+ In the text field under Import New Files enter the full path of any
+ files you want import into SUSE Manager or select deployable configuration files from the
+ Import Existing Files list. When done, click Import
+ Configuration Files.
+
+
+
+ Create File
+
+ Under Create File, you can directly create the configuration file
+ from scratch. Select the file type, specify the path and filename, where to store the file,
+ plus the symbolic link target filename and path. Ownership and permissions as well as macro
+ delimiters need to be set. For more information on using macros, see . In the File Contents text field, type the
+ configuration file. Select the type of file you are creating from the drop-down menu.
+ Possible choices are Shell, Perl, Python, Ruby and XML. When done, click Create
+ Configuration File.
+
+
+
+
+
+ System Details > Configuration
+ > Deploy Files
+ Under Deploy Files you find all files that can be deployed on the
+ selected system. Files from configuration channels with a higher priority take precedence over
+ files from configuration channels with a lower priority. emap
+ 2014-04-22: can't see if the rest is still accurate or has changed.
+
+
+
+
+
+
+ System Details > Configuration
+ > Compare Files
+
+
+ emap 2014-04-22: Still accurate? This subtab compares a
+ configuration file stored on the SUSE Manager with the file stored on the client. (It does not
+ compare versions of the same file stored in different channels.) Select the files to be
+ compared, click the Compare Files button, select a time to perform the diff,
+ and click the Schedule Compare button to
+ confirm.
+
+
+ To watch progress, see .
+
+ After the diff has been performed, go to Recent
+ Events in
+ to see the results.
+
+
+
+
+ System Details > Configuration
+ > Manage Configuration Channels
+ This subtab allows you to subscribe to and rank configuration channels associated with
+ the system, lowest first.
+ The List/Unsubscribe from Channels subtab contains a list of the
+ system's configuration channel subscriptions. Click the check box next to the Channel and click
+ Unsubscribe to remove the subscription to the channel.
+ The Subscribe to Channels subtab lists all available configuration
+ channels. To subscribe to a channel, select the check box next to it and press
+ Continue. To subscribe to all configuration channels, click Select
+ All and press Continue. The View/Modify
+ Rankings page automatically loads.
+ The View/Modify Rankings subtab allows users to set the priority with
+ which files from a particular configuration channel are ranked. The higher the channel is on
+ the list, the more its files take precedence over files on lower-ranked channels. For example,
+ the higher-ranked channel may have an httpd.conf file that will take
+ precedence over the same file in a lower-ranked channel.
+
+
+
+
+
+
+ System Details > Provisioning
+
+
+ 2011-01-11 - ke: Will this also work for AutoYaST? rollback is kickstart
+ only?
+ 2011-02-09 - ke: Probably applies, but ug will check it more closely next
+ time.
+ The Provisioning tab and its subtabs allow you to schedule and monitor
+ AutoYaST or Kickstart installations and to restore a system to its previous state. AutoYaST is a
+ SUSE Linux and Kickstart is a Red Hat utility—both allow you to automate the
+ reinstallation of a system. Snapshot rollbacks provide the ability to revert
+ certain changes on the system. You can roll back a set of RPM packages, but rolling back across
+ multiple update levels is not supported. Both features are described in the sections that
+ follow.
+
+
+ System Details > Provisioning
+ > Autoinstallation
+
+
+
+
+ The Schedule subtab allows you to configure and schedule an autoinstallation
+ for this system.
+
+ For background information about autoinstallation, see .
+
+ In the Schedule subtab, schedule the selected system for
+ autoinstallation. Choose from the list of available profiles.
+
+ You must first create a profile before it appears on this subtab. If you have not
+ created any profiles, refer to
+ before scheduling an autoinstallation for a system.
+
+ To alter autoinstallation settings, click on the Advanced
+ Configuration button. Configure the network connection and post-installation
+ networking information. You can aggregate multiple network interfaces into a single logical
+ "bonded" interface. In Kernel Options specify kernel options to be used
+ during autoinstallation. Post Kernel Options are used after the installation
+ is complete and the system is booting for the first time. Configure package profile
+ synchronization.
+ Select a time for the autoinstallation to begin and click Schedule Autoinstall
+ and Finish for all changes to take effect and to schedule the autoinstallation.
+ Alternatively, click Create PXE Installation Configuration to create a
+ Cobbler system record. The selected autoinstallation profile will be used to automatically
+ install the configured distribution next time that particular system boots from PXE. In this
+ case SUSE Manager and its network must be properly configured to allow PXE booting.
+
+ Any settings changed on the Advanced Configuration page will be
+ ignored when creating a PXE installation configuration for Cobbler.
+
+ The Variables subtab can be used to create Kickstart variables, which
+ substitute values in Kickstart files. To define a variable, create a name-value pair
+ (name/value) in the text box.
+ For example, if you want to Kickstart a system that joins the network of a specific
+ organization (for instance the Engineering department) you can create a profile variable to set
+ the IP address and the gateway server address to a variable that any system using that profile
+ will use. Add the following line to the Variables text box:
+ IPADDR=192.168.0.28
+GATEWAY=192.168.0.1
+ To use the system variable, use the name of the variable in the profile instead of the
+ value. For example, the portion of a Kickstart file could look like
+ the following:
+ network --bootproto=static --device=eth0 --onboot=on --ip=$IPADDR \
+ --gateway=$GATEWAY
+ The will be 192.168.0.28, and the
+ will be 192.168.0.1.
+
+ There is a hierarchy when creating and using variables in Kickstart files. System
+ Kickstart variables take precedence over profile variables, which in turn take precedence
+ over distribution variables. Understanding this hierarchy can alleviate confusion when using
+ variables in Kickstarts.
+
+ Using variables are just one part of the larger Cobbler infrastructure for creating
+ templates that can be shared between multiple profiles and systems.
+
+
+
+
+
+ System Details > Provisioning
+ > Power Management
+
+ SUSE Manager allows you to power on, off, and reboot systems (either physical or bare-metal)
+ via the IPMI protocol if the systems are IPMI-enabled. You need a fully patched SUSE Manager
+ installation. To use any power management functionality, IPMI configuration details must be
+ added to SUSE Manager. First select the target system on the systems list, then select
+ Provisioning
+ Power Management
+ . On the displayed configuration page, edit all required fields (marked with a red
+ asterisk) and click Save.
+ Systems can be powered on, off, or rebooted from the configuration page via corresponding
+ buttons. Note that any configuration change is also saved in the process. The Save and
+ Get Status button can also be used to query for the system's power state. If
+ configuration details are correct, a row is displayed with the current power status ("on" or
+ "off"). If a power management operation succeeds on a system, it will also be noted in its
+ Events
+ History
+ subtab.
+ Power management functionalities can also be used from the system set manager to operate
+ on multiple systems at the same time. Specifically, you can change power management
+ configuration parameters or apply operations (power on, off, reboot) to multiple systems at
+ once:
+
+
+ Add the respective systems to the system set manager as described in .
+
+
+ Click Manage (in the upper right corner), then
+ Provisioning
+ Power Management Configuration
+ to change one or more configuration parameters for all systems in the set. Note
+ that any field left blank will not alter the configuration parameter in selected systems.
+
+
+
+ Once all configuration parameters are set correctly, click Manage,
+ then
+ Provisioning
+ Power Management Operations
+ to power on, off or reboot systems from the set. Note that the Provisioning
+ entitlement is required for non-bare metal systems.
+
+
+ To check that a power operation was executed correctly, click
+ System Set Manager
+ Status
+ on the left-hand menu, then click on the proper line in the list. This will
+ display a new list with systems to which the operation was applied. In the event of errors
+ which prevent correct execution, a brief message with an explanation will be displayed in the
+ Note column.
+ This feature uses Cobbler power management, thus a Cobbler system record is automatically
+ created at first use if it does not exist already. In that case, the automatically created
+ system record will not be bootable from the network and will reference a dummy image. This is
+ needed because Cobbler does not currently support system records without profiles or images.
+ The current implementation of Cobbler power management uses the fence-agent tools to support
+ multiple protocols besides IPMI. Those are not supported by SUSE Manager but can be used by
+ adding the fence agent names as a comma-separated list to the
+ java.power_management.types configuration parameter.
+
+
+
+ System Details > Provisioning
+ > Snapshots
+
+ 2015-01-28 - ke: feedback by Silvio is in: bsc#906851.
+ Snapshots enable you to roll back the system's package profile, configuration files, and
+ SUSE Manager settings. Snapshots are always captured automatically after an action takes place.
+ The Snapshots subtab lists all snapshots for the system, including the
+ reason why the snapshot was taken, the time it was taken, and the number of tags applied to
+ each snapshot.
+
+ Technical Details
+
+
+
+ A snapshots is always done after a successful operation and not
+ before, as you might expect. One consequence of the fact that snapshots are taken after the
+ action is that, if you want to undo action number X, then you must roll back to the snapshot
+ number X-1.
+
+
+ It is possible to disable snapshotting globally (in rhn.conf set
+ enable_snapshots = 0), but it is enabled by default. No further fine
+ tuning is possible.
+
+
+
+ To revert to a previous configuration, click the Reason for the
+ snapshot and review the potential changes on the provided subtabs, starting with
+ Rollback.
+
+ Unsupported Rollback Scenarios
+ Snapshot roll backs support the ability to revert certain changes
+ to the system, but not in every scenario. For example, you can roll back a set of RPM
+ packages, but rolling back across multiple update levels is not supported.
+ Rolling back an SP migration is also not supported.
+
+ Each subtab provides the specific changes that will be made to the system during the
+ rollback:
+
+
+ group memberships,
+
+
+ channel subscriptions,
+
+
+ installed packages,
+
+
+ configuration channel subscriptions,
+
+
+ configuration files,
+
+
+ snapshot tags.
+
+
+ When satisfied with the reversion, return to the Rollback subtab and
+ click the Rollback to Snapshot button. To see the list again, click
+ Return to snapshot list.
+
+
+ Background Information About Snapshots
+ There is no maximum number of snapshots that SUSE Manager will keep, thus related database
+ tables will grow with system count, package count, channel count, and the number of
+ configuration changes over time. Installations with more than a thousand systems should
+ consider setting up a recurring cleanup script via the API or disabling this feature
+ altogether.
+
+ There is currently no integrated support for rotated snapshots
.
+
+
+
+
+
+
+ Snapshot rollback gets scheduled like any other action, this means the rollback usually
+ does not happen immediately.
+
+
+
+ System Details > Provisioning
+ > Snapshot Tags
+
+ Snapshot tags provide a means to add meaningful descriptions to your most recent system
+ snapshot. This can be used to indicate milestones, such as a known working configuration or a
+ successful upgrade. To tag the most recent snapshot, click Create System
+ Tag, enter a descriptive term in the Tag name field, and click the
+ Tag Current Snapshot button. You may then revert using this tag directly by
+ clicking its name in the Snapshot Tags list. To delete tags, select their check boxes, click
+ Remove Tags, and confirm the action.
+
+
+
+
+
+
+ System Details > Groups
+
+
+ SUSE Manager Administrator
+ assigning and removing
+
+ SUSE Manager Administrator
+ joining and leaving
+ The Groups tab and its subtabs allow you to manage the system's group
+ memberships.
+
+ System Details > Groups
+ > List/Leave
+
+ This subtab lists groups to which the system belongs and enables you to cancel
+ membership. Only System Group Administrators and SUSE Manager Administrators can remove systems
+ from groups. Non-admins just see a Review this system's group membership
+ page. To remove the system from one or more groups, select the respective check boxes of these
+ groups and click the Leave Selected Groups button. To see the
+ System Group Detailspage, click on the group's name. Refer to for more information.
+
+
+ System Details > Groups
+ > Join
+
+ Lists groups that the system can be subscribed to. Only System Group Administrators and
+ SUSE Manager Administrators can add a system to groups. Non-admins see a Review this
+ system's group membership page. To add the system to groups, select the groups'
+ check boxes and click the Join Selected Groups button.
+
+
+
+
+ System Details > Audit
+
+ Via the Audit tab, view OpenSCAP scan results or schedule scans. For
+ more information on auditing and OpenSCAP, refer to .
+
+
+
+
+ System Details > Events
+
+ Displays past, current, and scheduled actions on the system. You may cancel pending events
+ here. The following sections describe the Events subtabs and the features
+ they offer.
+
+ System Details > Events
+ > Pending
+ Lists events that are scheduled but have not started. A prerequisite action must complete
+ successfully before the given action is attempted. If an action has a prerequisite, no check
+ box is available to cancel that action. Instead, a check box appears next to the prerequisite
+ action; canceling the prerequisite action causes the action in question to fail.
+ Actions can be chained so that action 'a' requires action 'b' which requires action 'c'.
+ Action 'c' is performed first and has a check box next to it until it is completed
+ successfully. If any action in the chain fails, the remaining actions also fail. To unschedule
+ a pending event, select the event and click the Cancel Events button at the
+ bottom of the page. The following icons indicate the type of events:
+
+
+
+
+
+
+
+
+
+
+ — Package Event,
+
+
+
+
+
+
+
+
+
+ — Patch Event,
+
+
+
+
+
+
+
+
+
+ — Preferences Event,
+
+
+
+
+
+
+
+
+
+ — System Event.
+
+
+
+
+
+ System Details > Events
+ > History
+ The default display of the Events tab lists the type and status of
+ events that have failed, occurred or are occurring. To view details of an event, click its
+ summary in the System History list. To go back to the table again, click
+ Return to history list at the bottom of the page.
+
+
+
+
+
+ System Details > States
+
+
+ system states
+
+ SUSE Manager Administrator
+ System states
+
+
+ Web Interface
+ System States Packages
+
+ Web Interface
+ System States Custom
+
+
+ Web Interface
+ System States Highstate
+
+ Overview of states tabs.
+
+ System Details > States
+ > Packages
+ Search and install packages and assign them a pre-defined state for this machine. After
+ searching for a specific package, for example Vim, select the drop-down and choose Unmanaged,
+ Installed or Removed. Next select Latest or Any from the drop-down. Latest
+ applies the latest package version available while Any applies the package
+ version required to fulfil dependencies. Click the Save button to save
+ changes to the database, then click Apply to apply the new package
+ state.
+
+
+ System Details > States
+ > Custom
+ States which have been created on the States Catalog page located under
+ the Salt tab on the main navigation bar may be assigned to a system on the
+ States
+ Custom
+ page. Search for the custom state you wish to apply to the system then select the
+ Assign check-box. Click Save to save the change to the
+ database finally select Apply to apply the changes. States applied at the
+ system level will only be applied to the selected system.
+
+
+
+ System Details > States
+ > Highstate
+ From the Highstate page you can view and apply the Highstate for a
+ selected system. Select a date and time to apply the Highstate then click Apply
+ Highstate
+
+
+
+
+
+
+ System Groups
+
+
+ system group
+
+ SUSE Manager Administrator
+ list of
+
+ SUSE Manager Administrator
+ status
+
+ SUSE Manager Administrator
+ System Group List
+
+ Web Interface
+ System Group List
+
+ Web Interface
+ System Groups
+
+
+ The System Groups page allows SUSE Manager users to view the
+ System Groups list. Only System Group Administrators and SUSE Manager
+ Administrators may perform the following additional tasks:
+
+
+
+ Create system groups. (Refer to .)
+
+
+
+ Add systems to system groups. (Refer to .)
+
+
+ Remove systems from system groups. (Refer to .)
+
+
+
+ Assign system group permissions to users. (Refer to .)
+
+
+
+
+ The System Groups list displays all system groups. The list contains
+ several columns for each group:
+
+
+
+
+ Select — Via the check boxes add all systems in the selected groups to the
+ System Set Manager by clicking the Update button. All
+ systems in the selected groups are added to the System Set Manager. You can
+ then use the System Set Manager to perform actions on them simultaneously.
+ It is possible to select only those systems that are members of all of the selected groups,
+ excluding those systems that belong only to one or some of the selected groups. To do so,
+ select the relevant groups and click the Work with Intersection button. To
+ add all systems of all selected groups, click the Work with Union button.
+ Each system will show up once, regardless of the number of groups to which it belongs. Refer to
+ for details.
+
+
+
+ Updates — Shows which type of patch alerts are applicable to the group or
+ confirms that all systems are up-to-date. Clicking on a group's status icon takes you to the
+ Patch tab of its System Group Details page. Refer to
+ for more information.
+ The status icons call for differing degrees of attention:
+
+
+
+
+
+ Check Circle
+
+
+
+
+
+
+
+ — All systems in the group are up-to-date.
+
+
+
+
+
+ Exclamation Circle
+
+
+
+
+
+
+
+ — Critical patches available, update strongly
+ recommended.
+
+
+
+
+
+ Warning
+
+
+
+
+
+
+
+ — Updates available and recommended.
+
+
+
+
+
+ Health - Status of the systems in the group, reported by probes.
+
+
+
+ Group Name — The name of the group as configured during its creation. The
+ name should be explicit enough to distinguish from other groups. Clicking on the name of a
+ group takes you to the Details tab of its System Group
+ Details page. Refer to for
+ more information.
+
+
+
+ Systems — Total number of systems in the group. Clicking on the number takes
+ you to the Systems tab of the System Group Details page
+ for the group. Refer to for more
+ information.
+
+
+
+ Use in SSM — Clicking the Use in SSM link in this column
+ loads all and only the systems in the selected group and launches the System Set
+ Manager immediately. Refer to for more
+ information.
+
+
+
+
+
+ Creating Groups
+
+ SUSE Manager Administrator
+ creating
+ To add a new system group, click the Create Group link at the top-right
+ corner of the page. Type a name and description and click the Create Group
+ button. Make sure you use a name that clearly sets this group apart from others. The new group
+ will appear in the System Groups list.
+
+
+
+
+ Adding and Removing Systems in Groups
+
+ SUSE Manager Administrator
+ adding and removing
+ Systems can be added and removed from system groups. Clicking on the group name takes you
+ to the Details page. The Systems tab shows all systems in
+ the group and allows you to select some or all systems for deletion. Click on Remove
+ Systems to remove the selected systems from the group. The Target
+ Systems page shows you all systems that can be added to the group. Select the systems
+ and click the Add Systems button.
+
+
+
+
+ System Group Details
+
+ SUSE Manager Administrator
+ viewing details
+ At the top of each System Group Details page are two links:
+ Work With Group and Delete Group. Clicking
+ Delete Group deletes the System Group and should be used with caution.
+ Clicking Work With Group loads the group's systems and launches the
+ System Set Manager immediately just like the Use Group
+ button from the System Groups list. Refer to for more information.
+ The System Group Details page is split into the following tabs:
+
+ System Group Details > Details
+
+
+
+ SUSE Manager Administrator
+ deleting
+
+ SUSE Manager Administrator
+ editing details
+ Provides the group name and group description. To change this information, click
+ Edit These Properties, make your changes in the appropriate fields, and
+ click the Update Group button.
+
+
+ System Group Details > Systems
+
+
+ Lists all members of the system group. Clicking links within the table takes you to
+ corresponding tabs within the System Details page for the associated system.
+ To remove systems from the group, select the appropriate check boxes and click the
+ Remove Systems button on the bottom of the page. Clicking it does not
+ delete systems from SUSE Manager entirely. This is done through the System Set
+ Manager or System Details pages. Refer to or , respectively.
+
+
+
+ System Group Details > Target Systems
+
+
+
+ Target Systems — Lists all systems in your organization. To add systems to
+ the specified system group, click the check boxes to their left and click the Add
+ Systems button on the bottom right-hand corner of the page.
+
+
+ System Group Details > Patches
+
+
+
+ List of relevant patches for systems in the system group. Clicking the advisory takes you
+ to the Details tab of the Patch Details page. (Refer to
+ for more information.) Clicking the Affected Systems
+ number lists all of the systems affected by the patch. To apply the patch updates in this list,
+ select the systems and click the Apply Patches button.
+
+
+ System Group Details > Admins
+
+
+ List of all organization users that have permission to manage the system group. SUSE Manager
+ Administrators are clearly identified. System Group Administrators are marked with an asterisk
+ (*). To change the system group's users, select and deselect the appropriate check boxes and
+ click the Update button.
+
+
+
+ System Group Details > States
+
+
+ The States tab displays states which have been created and added using
+ the Salt > State Catalog. From this page you can
+ select which states should be applied across a group of systems. A state applied from this page
+ will be applied to all minions within a group.
+
+ Keep in mind states are applied according to the following order of hierarchy within SUSE
+ Manager. Organization > Group > Single
+ System
+
+
+
+ Applying States at the Group Level
+
+ Create a state using the Salt > State Catalog
+ or via the command line.
+
+
+ Browse to Systems > System Groups. Select the
+ group that a new state should be applied to. From a specific group page select the
+ States tab.
+
+
+ Use the search feature to located a state by name or click the Search
+ button to list all available states.
+
+
+ Select the checkbox for the state to be applied and click the Save
+ button. The save button will save the change to the database but will not apply the
+ state.
+
+
+ Apply the state by clicking the Apply button. The state will be
+ scheduled and applied to any systems included within a group.
+
+
+
+
+
+
+
+
+ System Set Manager
+
+
+ System Set Manager
+
+
+ The following actions executed on individual systems from the System Details page may be
+ performed for multiple systems via the System Set Manager. The System Set Manager can be used to
+ schedule actions on both Salt and Traditional systems. The following table provides information
+ on what actions may be performed across both Salt and Traditional systems. These two methods have
+ different actions which may be accessed with the System Set Manager:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ System Set Manager: Overview
+
+
+ Traditional SSM
+
+
+ Salt SSM
+
+
+
+
+
+
+
+
+ Systems:
+
+
+ List Systems
+
+
+
+ Supported
+
+
+ Supported
+
+
+
+
+ Supported
+
+
+ Supported
+
+
+
+
+
+
+
+ Install Patches:
+
+
+ Schedule Patch Updates
+
+
+
+
+ Supported
+
+
+ Supported
+
+
+
+
+ Supported
+
+
+ Supported
+
+
+
+
+
+
+
+ Install Packages:
+
+
+ Upgrade
+
+
+ Install
+
+
+ Remove
+
+
+ Verify
+
+
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+
+
+ Limited
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Not Available
+
+
+
+
+
+
+
+
+
+ Groups:
+
+
+ Create
+
+
+ Manage
+
+
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+
+
+
+
+
+ Channels:
+
+
+ Channel Memberships
+
+
+ Channel Subscriptions
+
+
+ Deploy / Diff Channels
+
+
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+
+
+ Limited
+
+
+ Supported
+
+
+ Not Available
+
+
+ Not Available
+
+
+
+
+
+
+
+ Provisioning:
+
+
+ Autoinstall Systems
+
+
+ Tag for Snapshot
+
+
+ Remote Commands
+
+
+ Power Management
+
+
+ Power Management Operations
+
+
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+
+
+ Not Available
+
+
+
+
+
+ Misc:
+
+
+ Update Hardware Profiles
+
+
+ Update Package Profiles
+
+
+ Update System Preferences
+
+
+ Set/Remove Custom Values
+
+
+ Add/Remove Add-on Types
+
+
+ Delete Systems
+
+
+ Reboot Systems
+
+
+ Migrate Systems to another Organization
+
+
+ Lock/Unlock Systems
+
+
+ Audit Systems(OpenSCAP)
+
+
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+
+
+ Limited
+
+
+ Supported
+
+
+ Supported
+
+
+ Not Available
+
+
+ Supported
+
+
+ Not Available
+
+
+ Supported
+
+
+ Supported
+
+
+ Supported
+
+
+ Not Available
+
+
+ Not Available
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Before performing actions on multiple systems, select the systems to work with. To select
+ systems, click Systems in the left navigation bar, check the boxes to the left
+ of the systems you wish to work with, and click the Manage button in the upper
+ bar.
+
+ Additionally, you can access the System Set Manager in three different ways:
+
+
+
+ Click the System Set Manager link in the left navigation area.
+
+
+ Click the Use in SSM link in the System Groups
+ list.
+
+
+ Click the Work with Group link on the System Group
+ Details page.
+
+
+
+
+
+ System Set Manager > Overview
+
+
+
+ This page contains links to options in the remaining tabs.
+
+
+
+
+ System Set Manager > Systems
+
+
+ List of selected systems.
+
+
+
+
+
+
+ System Set Manager > Patches
+
+
+ List of patch updates applicable to the current system set. Click the number in the
+ Systems column to see to which systems in the System Set Manager a patch applies. To apply
+ updates, select the patches and click the Apply Patches button.
+
+
+
+
+ System Set Manager > Packages
+
+
+ Click the number in the Systems column to see the systems in the System Set Manager to
+ which a package applies. Modify packages on the system via the following subtabs.
+
+ System Set Manager > Packages
+ > Install
+
+ This list includes all channels to which systems in the set are subscribed. A package is
+ only installed on a system if the system is subscribed to the channel providing the package.
+ Click on the channel name and select the packages from the list. Then click the
+ Install Packages button.
+
+
+ System Set Manager > Packages
+ > Remove
+
+ A list of all the packages installed on the selected systems that might be removed.
+ Multiple versions appear if systems in the System Set Manager have more than one version
+ installed. Select the packages to be deleted, then click the Remove Packages
+ button.
+
+
+ System Set Manager > Packages
+ > Upgrade
+
+ A list of all the packages installed on the selected systems that might be upgraded.
+ Systems must be subscribed to a channel providing the packages to be upgraded. If multiple
+ versions of a package are available, note that your system will be upgraded to the latest
+ version. Select the packages to be upgraded, then click the Upgrade Packages
+ button.
+
+
+
+ System Set Manager > Packages
+ > Verify
+
+ A list of all installed packages whose contents, file checksum, and other details may be
+ verified. At the next check in, the verify event issues the command rpm
+ --verify for the specified package. If there are any discrepancies, they are
+ displayed in the System Details page for each system.
+ Select the check box next to all packages to be verified, then click the Verify
+ Packages button. On the next page, select a date and time for the verification, then
+ click the Schedule Verifications button.
+
+
+
+
+
+
+ System Set Manager > Groups
+
+
+ Tools to create groups and manage membership. These functions are limited to SUSE Manager
+ Administrators and System Group Administrators. To add a new group, click Create
+ Group on the top-right corner. In the next page, type the group name and description
+ in the respective fields and click the Create Group button. To add or remove
+ selected systems in any of the system groups, toggle the appropriate radio buttons and click the
+ Alter Membership button.
+
+
+
+
+ System Set Manager > Channels
+
+
+ Manage channel associations through the following subtabs:
+
+ System Set Manager > Channels
+ > Base Channels
+
+ As a Channel Administrator, you may change the base channels your systems are subscribed
+ to. Valid channels are either channels created by your organization, or the vendor's default
+ base channel for your operating system version and processor type. Systems will be unsubscribed
+ from all channels, and subscribed to their new base channels.
+
+ Changing Base Channel
+ This operation can have a dramatic effect on the packages and patches available to the
+ systems. Use with caution.
+
+ If you want to change the base channel, select the new one from the Desired base
+ Channel and confirm the action.
+
+
+ System Set Manager > Channels
+ > Child Channels
+
+ To subscribe or unsubscribe selected systems to any of the channels, toggle the
+ appropriate check boxes and click the Alter Subscriptions button. Keep in
+ mind that subscribing to a channel uses a channel entitlement for each system in the selected
+ group. If too few entitlements are available, some systems fail to subscribe. Systems must
+ subscribe to a base channel before subscribing to a child channel.
+
+
+
+
+
+ System Set Manager > Configuration
+
+
+ Like in the
+ System Details
+ Channels
+ Configuration
+ tab, the subtabs here can be used to subscribe the selected systems to
+ configuration channels and deploy and compare the configuration files on the systems. The
+ channels are created in the Manage Config Channels interface within the
+ Channels category. Refer to for
+ channel creation instructions.
+ To manage the configuration of a system, install the latest rhncfg*
+ packages. Refer to for instructions on enabling and
+ disabling scheduled actions for a system.
+
+
+ System Set Manager > Configuration
+ > Deploy Files
+
+ Use this subtab to distribute configuration files from your central repository on
+ SUSE Manager to each of the selected systems. The table lists the configuration files associated
+ with any of the selected systems. Clicking its system count displays the systems already
+ subscribed to the file.
+ To subscribe the selected systems to the available configuration files, select the check
+ box for each desired file. When done, click Deploy Configuration and
+ schedule the action. Note that the latest versions of the files, at the time of scheduling, are
+ deployed. Newer versions created after scheduling are disregarded.
+
+
+
+ System Set Manager > Configuration
+ > Compare Files
+
+ Use this subtab to validate configuration files on the selected systems against copies in
+ your central repository on SUSE Manager. The table lists the configuration files associated with
+ any of the selected systems. Clicking a file's system count displays the systems already
+ subscribed to the file.
+ To compare the configuration files deployed on the systems with those in SUSE Manager,
+ select the check box for each file to be validated. Then click
+ Analyze Differences
+ Schedule File Comparison
+ . The comparisons for each system will not complete until each system checks in to
+ SUSE Manager. Once each comparison is complete, any differences between the files will be
+ accessible from each system's events page.
+ Note that the latest versions of the files, at the time of scheduling, are compared.
+ Newer versions created after scheduling are disregarded. Find the results in the main
+ Schedule category or within the
+ System Details
+ Events
+ tab.
+
+
+
+ System Set Manager > Configuration
+ > Subscribe to Channels
+
+
+ Subscribe systems to configuration channels, and in a second step
+ rank these channels according to the order of preference. This
+ tab is available only to SUSE Manager Administrators and Configuration
+ Administrators.
+
+
+
+
+ Select channels for subscription by activating the checkbox.
+ Once done, confirm with Continue.
+
+
+
+
+ In the second step, rank the channels with the arrow-up or
+ arrow-down symbols.
+
+
+ Then decide how the channels are applied to the selected systems.
+ The three buttons below the channels reflect your
+ options. Clicking Subscribe with Highest
+ Priority places all the ranked channels before any other
+ channels to which the selected systems are currently
+ subscribed. Clicking Subscribe With Lowest
+ Priority places the ranked channels after those channels
+ to which the selected systems are currently subscribed. Clicking
+ Replace Existing Subscriptions removes any
+ existing association and creates new ones with the ranked
+ channels, leaving every system with the same config channels in
+ the same order.
+
+
+ Confliction Ranks
+
+ In the first two cases, if any of the newly ranked configuration
+ channels are already in a system's existing config channel list,
+ the duplicate channel is removed and replaced according to the
+ new rank, effectively reordering the system's existing
+ channels. When such conflicts exist, you are presented with a
+ confirmation page to ensure the intended action is correct.
+ When the change has taken place, a message appears at the top of
+ the page indicating the update was successful.
+
+
+
+ Then, click Apply Subscriptions.
+
+
+
+
+ Channels are accessed in the order of their rank. Your local
+ configuration channel always overrides all other channels.
+
+
+
+
+ System Set Manager > Configuration
+ > Unsubscribe from Channels
+
+ Administrators may unsubscribe systems from configuration channels by clicking the check
+ box next to the channel name and clicking the Unsubscribe Systems button.
+
+
+
+
+ System Set Manager > Configuration
+ > Enable Configuration
+
+
+
+ Registered systems without configuration management preparation
+ will appear here in a list.
+
+
+
+
+ Administrators may enable configuration management by clicking
+ the Enable SUSE Manager Configuration Management
+ button.
+
+ You can also schedule the action by adjusting the Schedule
+ no sooner than date and time setting using the drop-down,
+ then clicking Enable SUSE Manager Configuration
+ Management.
+
+
+ Then the systems will get subscribed to the required SUSE Manager
+ tools channel and required rhncfg* packages will
+ get installed.
+
+
+
+
+
+
+
+ System Set Manager > Provisioning
+
+
+ Set the options for provisioning systems via the following subtabs.
+
+
+ System Set Manager > Provisioning
+ > Autoinstallation
+
+
+ Use this subtab to reinstall clients. To schedule autoinstallations for these systems,
+ select a distribution. The autoinstallation profile used for each system in the set is
+ determined via the Autoinstallable Type radio buttons.
+ Choose Select autoinstallation profile if you want to apply the same
+ profile to all systems in the set. This is the default option. You will see a list of available
+ profiles to select from once you click on Continue.
+ Choose Autoinstall by IP Address if you want to apply different
+ autoinstallation profiles to different systems in the set, by IP address. To do so, at least
+ two autoinstallation profiles must be configured with associated IP ranges.
+
+
+ If you use Autoinstall by IP Address, SUSE Manager will automatically
+ pick a profile for each system so that the system's IP address will be in one of the IP ranges
+ specified in the profile itself. If such a profile cannot be found, SUSE Manager will look for an
+ organization default profile and apply that instead.
+
+ If no matching IP ranges nor
+ organization default profiles can be found, no autoinstallation will be performed on the
+ system. You will be notified on the next page if that happens.
+ To use Cobbler system records for autoinstallation, select Create PXE
+ Installation Configuration. With PXE boot, you can not only reinstall clients, but
+ automatically install machines that don't have an operating system installed yet. SUSE Manager and
+ its network must be properly configured to enable PXE booting.
+
+
+
+ If a system set contains bare-metal systems and installed clients, only features working
+ for systems without an operating system installed will be available. Full features will be
+ enabled again once all bare-metal systems are removed from the set.
+
+ If any of the systems connect to SUSE Manager via a proxy server, choose either the
+ Preserve Existing Configuration radio button or the Use
+ Proxy radio button. If you choose to autoinstall through a proxy server, select from
+ the available proxies listed in the drop-down box beside the Use Proxy radio
+ button. All of the selected systems will autoinstall via the selected proxy. Click the
+ Schedule Autoinstall button to confirm your selections. When the
+ autoinstallations for the selected systems are successfully scheduled, you will return to the
+ System Set Manager page.
+
+
+
+ System Set Manager > Provisioning
+ > Tag Systems
+
+ Use this subtab to add meaningful descriptions to the most recent snapshots of your
+ selected systems. To tag the most recent system snapshots, enter a descriptive term in the
+ Tag name field and click the Tag Current Snapshots
+ button.
+
+
+
+ System Set Manager > Provisioning
+ > Rollback
+
+ Use this subtab to rollback selected Provisioning-entitled systems to previous snapshots
+ marked with a tag. Click the tag name, verify the systems to be reverted, and click the
+ Rollback Systems button.
+
+
+
+ System Set Manager > Provisioning
+ > Remote Command
+
+ Use this subtab to issue remote commands on selected Provisioning-entitled systems. First
+ create a run file on the client systems to allow this function to operate.
+ Refer to for instructions. Then identify a
+ specific user, group, timeout period, and the script to run. Select a date and time to execute
+ the command and click Schedule.
+
+
+
+ System Set Manager > Provisioning
+ > Power Management Configuration
+
+
+
+
+
+ System Set Manager > Provisioning
+ > Power Management Operation
+
+
+
+
+
+
+
+ System Set Manager > Audit
+
+
+ System sets can be scheduled for XCCDF scans; XCCDF stands for
+ The Extensible Configuration Checklist Description
+ Format
. Enter the command and command-line arguments, as well
+ as the path to the XCCDF document. Then schedule the scan. All target
+ systems are listed below with a flag whether they support OpenSCAP
+ scans. For more details on OpenSCAP and audits, refer to .
+
+
+
+
+ System Set Manager > Misc
+
+ On the Misc page, you can modify Custom System
+ Information. Click Set a custom value for selected systems, then
+ the name of a key. Enter values for all selected systems, then click the Set
+ Values button. To remove values for all selected systems, click Remove a
+ custom value from selected systems, then the name of the key. Click the
+ Remove Values button to delete.
+ Set System Preferences via the respective radio buttons.
+
+ System Set Manager > Misc
+ > Hardware
+
+ Click on the Hardware subtab to schedule a hardware profile refresh.
+ Click Confirm Refresh.
+
+
+ System Set Manager > Misc
+ > Software
+
+ Click the Software subtab, then the Confirm Refresh
+ button to schedule a package profile update of the selected systems.
+
+
+ System Set Manager > Misc
+ > Migrate
+ Click the Migrate subtab to move selected systems to a selected
+ organization.
+
+
+ System Set Manager > Misc
+ > Lock/Unlock
+
+ Select the Lock/Unlock subtab to select systems to be excluded from
+ package updates. Enter a Lock reason in the text field and click the
+ Lock button. Already locked systems can be unlocked on this page. Select
+ them and click Unlock.
+
+
+ System Set Manager > Misc
+ > Delete
+
+
+ Click the Delete subtab, to remove systems by deleting their system
+ profiles. Click the Confirm Deletion button to remove the selected profiles
+ permanently.
+
+
+
+
+
+ System Set Manager > Misc
+ > Reboot
+
+
+ Select the appropriate systems, then click the Reboot
+ Systems link to select these systems for reboot.
+
+
+ To cancel a reboot action, see .
+
+
+
+
+
+
+ Advanced Search
+
+
+ SUSE Manager Administrator
+ searching
+
+
+ SUSE Manager Administrator
+ Advanced Search
+
+
+ Web Interface
+ System Search
+
+
+
+ Carry out an Advanced Search on your systems
+ according to the following criteria: network info, hardware devices,
+ location, activity, packages, details, DMI info, and hardware.
+
+
+
+ Refine searches using the Field to Search drop-down menu, which is set
+ to Name/Description by default.
+
+
+
+
+
+ The Activity selections (Days Since Last Check-in, for instance) are
+ useful in finding and removing outdated system profiles.
+
+ Type the keyword, select the criterion to search by, use the radio buttons to specify
+ whether you wish to query all systems or only those in the System Set Manager,
+ and click the Search button. To list all systems that do
+ not match the criteria, select the Invert Result check
+ box.
+
+ The results appear at the bottom of the page. For details on how to use the resulting
+ system list, refer to .
+
+
+
+ Activation Keys
+
+
+ activation key
+
+ email address
+ with activation key
+
+ reactivating
+ with activation key
+
+ rhnreg_ks
+
+ Web Interface
+ activation keys
+
+ Users with the Activation Key Administrator role (including SUSE Manager Administrators) can
+ generate activation keys in the SUSE Manager Web interface. With such an activation key, register a
+ SUSE Linux Enterprise or Red Hat Enterprise Linux system, entitle the system to a SUSE Manager service level and
+ subscribe the system to specific channels and system groups through the
+ rhnreg_ks command line utility.
+
+
+ System-specific activation keys created through the Reactivation subtab
+ of the System Details page are not part of this list because they are not
+ reusable across systems.
+
+
+ For more information about Activation Keys, see “Activation Key Management” (↑Best Practices).
+
+
+ Managing Activation Keys
+
+ account
+ deleting
+
+ account
+ disabling
+
+ account
+ editing
+
+ account
+ creating
+ To create an activation key:
+
+ Creating Activation Keys
+
+ Select Systems from the top navigation bar then Activation
+ Keys from the left navigation bar.
+
+
+ Click the Create Key link at the upper right corner.
+
+
+
+ Description — Enter a Description to identify the
+ generated activation key.
+
+
+
+ Key — Either choose automatic generation by leaving this field blank or
+ enter the key you want to generate in the Key field. This string of
+ characters can then be used with rhnreg_ks to register client systems with
+ SUSE Manager. Refer to for details.
+
+ Allowed Characters
+
+ 2011-03-24 - ke: bwiedemann 20110210: # wirklich alle? auch "'\ und
+ 0 bytes? Also see the quick starts! 2016-02-17 - moio: ," are not allowed <> (){} will
+ get removed automatically Do not insert commas or double quotes in the key. All
+ other characters are allowed, but <> (){} (this includes the space)
+ will get removed automatically. If the string is empty, a random one is generated.
+ Commas are problematic because they are used as separator when two or more activation
+ keys are used at once.
+
+
+
+
+ Usage — The maximum number systems that can be registered with the
+ activation key concurrently. Leave blank for unlimited use. Deleting a system profile reduces
+ the usage count by one and registering a system profile with the key increases the usage count
+ by one.
+
+
+
+ Base Channels — The primary channel for the key. This can be either the
+ SUSE Manager Default channel, a SUSE provided channel, or a custom base
+ channel.
+ Selecting SUSE Manager Default allows client systems to register with
+ the SUSE-provided default channel that corresponds with their installed version of SUSE Linux Enterprise.
+ You can also associate the key with a custom base channel. If a system using this key is not
+ compatible with the selected channel, it will fall back to the SUSE Manager default channel.
+
+
+
+
+ Add-on System Types — The supplemental system types for the key, e. g.
+ Virtualization Host. All systems will receive these system types with the key.
+
+
+
+ Contact Method - Select how clients communicate with SUSE Manager.
+ Pull waits for the client to check in. With Push via
+ SSH and Push via SSH tunnel the server contacts the client via
+ SSH (with or without tunnel) and pushes updates and actions, etc.
+ For more information about contact methods, see “Contact Methods” (↑Best Practices).
+
+
+
+
+ Universal Default — Select whether or not this key should be considered the
+ primary activation key for your organization.
+
+ Changing the Default Activation Key
+ Only one universal default activation key can be defined per organization. If a
+ universal key already exists for this organization, you will unset the currently used
+ universal key by activating the check box.
+
+
+
+ Click Create Activation Key.
+
+
+ To create more activation keys, repeat the steps above.
+
+ After creating the unique key, it appears in the list of activation keys along with the
+ number of times it has been used (see ). Only
+ Activation Key Administrators can see this list. At this point, you can configure the key
+ further, for example, associate the key with child channels (e.g., the Tools child channel),
+ packages (e.g., the rhncfg-actions package) and groups. Systems
+ registered with the key get automatically subscribed to them.
+ To change the settings of a key, click the key's description in the list to display its
+ Details page (see ). Via
+ additional tabs you can select child channels, packages, configuration channels, group
+ membership and view activated systems. Modify the appropriate tab then click the Update
+ Activation Key button. To disassociate channels and groups from a key, deselect them
+ in the respective menus by Ctrl-clicking their highlighted names. To remove a
+ key entirely, click the Delete Key link in the upper right corner of the
+ Details page. In the upper right corner find also the Clone
+ Key link.
+
+
+ Any (client tools) package installation requires that the Client Tools channel is
+ available and the Provisioning checkbox is selected. The Client Tools channel
+ should be selected in the Child Channels tab.
+
+ After creating the activation key, you can see in the
+ Details tab a checkbox named
+ Configuration File Deployment. If you select it,
+ all needed packages are automatically added to the
+ Packages list. By default, the following
+ packages are added: rhncfg,
+ rhncfg-client, and
+ rhncfg-actions.
+
+ If you select Virtualization Host you automatically get the following
+ package: rhn-virtualization-host.
+
+
+ Adding the osad package makes sense if you want to execute
+ scheduled actions immediately after the schedule time. When the
+ activation key is is created, you can add packages with selecting
+ the key (SoftwareActivation
+ Keys), then on the activation key details
+ tab, go for the Packages subtab and add
+ osad.
+
+
+
+
+ To disable system activations with a key, uncheck the corresponding
+ box in the Enabled column in the key list. The
+ key can be re-enabled by selecting the check box. Click the
+ Update Activation Keys button on the bottom
+ right-hand corner of the page to apply your changes.
+
+
+
+
+ Using Multiple Activation Keys at Once
+
+
+ account
+ multiple use
+
+
+ Multiple activation keys can be specified at the command-line or in
+ a single autoinstallation profile. This allows you to aggregate the
+ aspects of various keys without recreating a specific key for every
+ system that you want to register, simplifying the registration and
+ autoinstallation processes while slowing the growth of your key
+ list. Separate keys with a comma at the command line with
+ rhnreg_ks or in a Kickstart profile in the
+ Activation Keys tab of the
+ Autoinstallation Details page.
+
+
+ Registering with multiple activation keys requires some caution.
+ Conflicts between some values cause registration to fail. Conflicts
+ in the following values do not cause registration to fail, a
+ combination of values is applied: software packages, software child
+ channels, and configuration channels. Conflicts in the remaining
+ properties are resolved in the following manner:
+
+
+
+ Base software channels: registration fails.
+
+
+ System types: registration fails.
+
+
+ Enable configuration flag: configuration management is set.
+
+
+ Do not use system-specific activation keys along with other activation keys; registration
+ fails in this event.
+
+
+ You are now ready to use multiple activation keys at once.
+
+
+
+
+
+ Stored Profiles
+
+
+ Web Interface
+ stored profiles
+
+ SUSE Manager Provisioning customers can create package profiles via the System
+ Details page. Under
+ Software
+ Packages
+ Profiles
+ , click on Create System Profile. Enter a Profile
+ Name and Profile Description, then click Create
+ Profile. These profiles are displayed on the Stored Profiles page
+ (left navigation bar), where they can be edited or deleted.
+
+
+
+ To edit a profile, click its name in the list, alter its name or
+ description, and click the Update button. To view
+ software associated with the profile, click the
+ Packages subtab. To remove the profile entirely,
+ click Delete Profile at the upper-right corner of
+ the page.
+
+
+ For more information about stored profiles, see in
+
+ .
+
+
+
+
+ Custom System Info
+
+
+ Web Interface
+ custom system information
+
+ SUSE Manager customers may include completely customizable information about their systems.
+ Unlike with notes, the information here is more formal and can be searched. For instance, you may
+ decide to specify an asset tag for each system. To do so, select Custom System
+ Info from the left navigation bar and create an asset key.
+
+ Click Create Key in the upper-right corner of the page. Enter a suitable
+ label and description, such as Asset and Precise location of each
+ system, then click Create Key. The key will show up in the custom
+ info keys list.
+
+ Once the key exists, you may assign a value to it through the Custom
+ Info tab of the System Details page. Refer to for instructions.
+
+
+
+ Autoinstallation
+
+
+ Autoinstallation Types: AutoYaST and Kickstart
+
+ In the following section, AutoYaST and AutoYaST features apply for SUSE Linux Enterprise
+ client systems only. For RHEL systems, use Kickstart and Kickstart
+ features.
+
+
+
+
+ AutoYaST and Kickstart configuration files allow administrators to create
+ an environment for automating otherwise time-consuming system
+ installations, such as multiple servers or workstations. AutoYaST files
+ have to be uploaded to be managed with SUSE Manager. Kickstart files can
+ be created, modified, and managed within the SUSE Manager Web interface.
+
+
+
+ SUSE Manager also features the Cobbler installation server.
+
+
+
+
+
+ SUSE Manager provides an
+ interface for developing Kickstart and AutoYaST profiles that can be used
+ to install Red Hat Enterprise Linux or SUSE Linux Enterprise on either new or already-registered
+ systems automatically according to certain specifications.
+
+
+
+
+
+ This overview page displays the status of automated installations
+ (Kickstart and AutoYaST) on your client systems: the types and number of
+ profiles you have created and the progress of systems that are
+ scheduled to be installed using Kickstart or AutoYaST. In the upper right
+ area is the Autoinstallation Actions section, which
+ contains a series of links to management actions for your Kickstart or
+ AutoYaST profiles.
+
+ Before explaining the various automated installation options on this
+ page, the next two sections provide an introduction to AutoYaST () and Kickstart ().
+
+
+
+
+ Introduction to AutoYaST
+
+ 2011-01-19 - ke: do we want to enhance this intro?
+ 2011-01-24: it can stay as is for the moment.
+
+
+ Using AutoYaST, a system administrator can create a single file containing
+ the answers to all the questions that would normally be asked during a
+ typical installation of a SUSE Linux Enterprise system.
+
+
+ AutoYaST files can be kept on a single server system and read by
+ individual computers during the installation. This way the same AutoYaST
+ file is used to install SUSE Linux Enterprise on multiple machines.
+
+
+ The SUSE Linux Enterprise Server AutoYaST ()
+ contains an in-depth discussion of Automated
+ Installation
using AutoYaST.
+
+
+
+ AutoYaST Explained
+ AutoYaST
+ explained
+
+ When a machine is to receive a network-based AutoYaST installation, the
+ following events must occur in this order:
+
+
+
+
+ After being connected to the network and turned on, the machine's PXE
+ logic broadcasts its MAC address and requests to be discovered.
+
+
+
+
+ If no static IP address is used, the DHCP server recognizes the
+ discovery request and offers network information needed for the new
+ machine to boot. This includes an IP address, the default gateway to
+ be used, the netmask of the network, the IP address of the TFTP or
+ HTTP server holding the bootloader program, and the full path and file
+ name to that program (relative to the server's root).
+
+
+
+
+ The machine applies the networking information and initiates a session
+ with the server to request the bootloader program.
+
+
+
+
+ The bootloader searches for its configuration file on the server from
+ which it was loaded. This file dictates which Kernel and Kernel
+ options, such as the initial RAM disk (initrd) image, should be
+ executed on the booting machine. Assuming the bootloader program is
+ SYSLINUX, this file is located in the
+ pxelinux.cfg directory on the server and named
+ the hexadecimal equivalent of the new machine's IP address. For
+ example, a bootloader configuration file for SUSE Linux Enterprise Server should contain:
+
+port 0
+prompt 0
+timeout 1
+default autoyast
+label autoyast
+ kernel vmlinuz
+ append autoyast=http://my_susemanager_server/path \
+ install=http://my_susemanager_server/repo_tree
+
+
+
+ The machine accepts and uncompresses the initrd and kernel, boots the
+ kernel, fetches the instsys from the install server and initiates the
+ AutoYaST installation with the options supplied in the bootloader
+ configuration file, including the server containing the AutoYaST
+ configuration file.
+
+
+
+
+ The new machine is installed based on the parameters established
+ within the AutoYaST configuration file.
+
+
+
+
+
+ AutoYaST Prerequisites
+
+ Some preparation is required for your infrastructure to handle AutoYaST
+ installations. For instance, before creating AutoYaST profiles, you may
+ consider:
+
+
+ 2011-01-21 - ke: Does it make sense to mention this?
+ 2011-01-24: Recommended by ug, there is now an item on HTTP and SM.
+
+
+
+
+ A DHCP server is not required for AutoYaST, but it can make things
+ easier. If you are using static IP addresses, you should select static
+ IP while developing your AutoYaST profile.
+
+
+
+
+ Host the AutoYaST distribution trees via HTTP, properly provided by
+ SUSE Manager.
+
+
+
+
+ If conducting a so-called bare-metal AutoYaST installation, you should
+ do the following:
+
+
+
+
+ Configure DHCP to assign the required networking parameters and the
+ bootloader program location.
+
+
+
+
+ In the bootloader configuration file, specify the kernel and
+ appropriate kernel options to be used.
+
+
+
+
+
+
+
+
+ Building Bootable AutoYaST ISOs
+
+ 2011-01-24 - ke: good enough?
+
+
+ 2011-02-09 - kkaempf: Does this work this way on SLE?
+
+
+ 2011-02-09 - ke: Yes, it does. Confirmed by ug. See
+ http://www.suse.de/~ug/AutoYaST_FAQ.html#bD
+
+
+ 2015-08-11 - ke: http://doccomments.provo.novell.com/comments/28738
+ wants us to recommend cobbler. Changing accordingly.
+
+
+ While you can schedule a registered system to be installed by AutoYaST
+ with a new operating system and package profile, you can also
+ automatically install a system that is not registered with SUSE Manager,
+ or does not yet have an operating system installed. One common method of
+ doing this is to create a bootable CD-ROM that is inserted into the
+ target system. When the system is rebooted or switched on, it boots from
+ the CD-ROM, loads the AutoYaST configuration from your SUSE Manager, and
+ proceeds to install SUSE Linux Enterprise Server according to the AutoYaST profile you have
+ created.
+
+
+
+
+ To use the CD-ROM, boot the system and type autoyast
+ at the prompt (assuming you left the label for the AutoYaST boot as
+ autoyast). When you press Enter, the
+ AutoYaST installation begins.
+
+
+ For more information about image creation, refer to KIWI at
+ .
+
+
+
+
+ Integrating AutoYaST with PXE
+
+
+ 2011-01-19 - ke: reality check required
+
+
+ 2011-01-24 - ke: I added note on uploading with Web UI
+
+ In addition to CD-ROM-based installations, AutoYaST installation through
+ a Pre-Boot Execution Environment (PXE) is supported. This is less
+ error-prone than CDs, enables AutoYaST installation from bare metal, and
+ integrates with existing PXE/DHCP environments.
+
+
+ To use this method, make sure your systems have network interface cards
+ (NIC) that support PXE, install and configure a PXE server, ensure DHCP
+ is running, and place the installation repository on an HTTP server for
+ deployment. Finally upload the AutoYaST profile via the Web interface to
+ the SUSE Manager server. Once the AutoYaST profile has been created, use
+ the URL from the Autoinstallation Overview page, as
+ for CD-ROM-based installations.
+
+
+ To obtain specific instructions for conducting PXE AutoYaST installation,
+ refer to the Using PXE Boot section of the
+ SUSE Linux Enterprise Deployment Guide.
+
+
+ Starting with , AutoYaST
+ options available from
+ SystemsKickstart
+ are described.
+
+
+
+
+
+
+
+
+ Introduction to Kickstart
+
+ 2011-01-19 - ke: do we want to keep this intro and the following sections?
+
+ => joe: yes, it is important.
+
+
+ Using Kickstart, a system administrator can create a single file
+ containing the answers to all the questions that would normally be asked
+ during a typical installation of Red Hat Enterprise Linux.
+
+
+ Kickstart files can be kept on a single server and read by individual
+ computers during the installation. This method allows you to use one
+ Kickstart file to install Red Hat Enterprise Linux on multiple machines.
+
+
+ The Red Hat Enterprise Linux System Administration
+ Guide contains an in-depth description of Kickstart
+ ().
+
+
+
+ Kickstart Explained
+ Kickstart
+ explained
+
+ When a machine is to receive a network-based Kickstart, the
+ following events must occur in this order:
+
+
+
+
+ After being connected to the network and turned on, the machine's PXE
+ logic broadcasts its MAC address and requests to be discovered.
+
+
+
+
+ If no static IP address is used, the DHCP server recognizes the
+ discovery request and offers network information needed for the new
+ machine to boot. This information includes an IP address, the default
+ gateway to be used, the netmask of the network, the IP address of the
+ TFTP or HTTP server holding the bootloader program, and the full path
+ and file name of that program (relative to the server's root).
+
+
+
+
+ The machine applies the networking information and initiates a session
+ with the server to request the bootloader program.
+
+
+
+
+ The bootloader searches for its configuration file on the server from
+ which it was loaded. This file dictates which kernel and kernel
+ options, such as the initial RAM disk (initrd) image, should be
+ executed on the booting machine. Assuming the bootloader program is
+ SYSLINUX, this file is located in the
+ pxelinux.cfg directory on the server and named
+ the hexadecimal equivalent of the new machine's IP address. For
+ example, a bootloader configuration file for Red Hat Enterprise Linux
+ AS 2.1 should contain:
+
+port 0
+prompt 0
+timeout 1
+default My_Label
+label My_Label
+ kernel vmlinuz
+ append ks=http://my_susemanager_server/path \
+ initrd=initrd.img network apic
+
+
+
+ The machine accepts and uncompresses the init image and kernel, boots
+ the kernel, and initiates a Kickstart installation with the options
+ supplied in the bootloader configuration file, including the server
+ containing the Kickstart configuration file.
+
+
+
+
+ This Kickstart configuration file in turn directs the machine to
+ the location of the installation files.
+
+
+
+
+ The new machine is built based on the parameters established within
+ the Kickstart configuration file.
+
+
+
+
+
+ Kickstart Prerequisites
+
+ Some preparation is required for your infrastructure to handle
+ Kickstarts. For instance, before creating Kickstart profiles, you
+ may consider:
+
+
+
+
+ A DHCP server is not required for kickstarting, but it can make things
+ easier. If you are using static IP addresses, select static IP while
+ developing your Kickstart profile.
+
+
+
+
+ An FTP server can be used instead of hosting the Kickstart
+ distribution trees via HTTP.
+
+
+
+
+ If conducting a bare metal Kickstart, you should configure DHCP
+ to assign required networking parameters and the bootloader program
+ location. Also, specify within the bootloader configuration file the
+ kernel to be used and appropriate kernel options.
+
+
+
+
+
+ Building Bootable Kickstart ISOs
+
+ While you can schedule a registered system to be kickstarted to a new
+ operating system and package profile, you can also Kickstart a
+ system that is not registered with SUSE Manager or does not yet have an
+ operating system installed. One common method of doing this is to create
+ a bootable CD-ROM that is inserted into the target system. When the
+ system is rebooted, it boots from the CD-ROM, loads the Kickstart
+ configuration from your SUSE Manager, and proceeds to install Red Hat Enterprise Linux
+ according to the Kickstart profile you have created.
+
+
+ To do this, copy the contents of /isolinux from the
+ first CD-ROM of the target distribution. Then edit the
+ isolinux.cfg file to default to 'ks'. Change the
+ 'ks' section to the following template:
+
+label ks
+kernel vmlinuz
+ append text ks=url initrd=initrd.img lang= devfs=nomount \
+ ramdisk_size=16438 ksdevice
+
+ IP address-based Kickstart URLs will look like this:
+
+http://my.manager.server/kickstart/ks/mode/ip_range
+
+
+ The Kickstart distribution defined via the IP range should match
+ the distribution from which you are building, or errors will occur.
+ ksdevice is optional, but looks like:
+
+ksdevice=eth0
+
+
+ It is possible to change the distribution for a Kickstart profile within
+ a family, such as Red Hat Enterprise Linux AS 4 to Red Hat Enterprise
+ Linux ES 4, by specifying the new distribution label. Note that you
+ cannot move between versions (4 to 5) or between updates (U1 to U2).
+
+
+ Next, customize isolinux.cfg further for your needs
+ by adding multiple Kickstart options, different boot messages, shorter
+ timeout periods, etc.
+
+
+
+ Next, create the ISO as described in the Making an
+ Installation Boot CD-ROM section of the Red Hat
+ Enterprise Linux Installation Guide. Alternatively, issue
+ the command:
+
+mkisofs -o file.iso -b isolinux.bin -c boot.cat -no-emul-boot \
+ -boot-load-size 4 -boot-info-table -R -J -v -T isolinux/
+
+ Note that isolinux/ is the relative path to the
+ directory containing the modified isolinux files copied from the
+ distribution CD, while file.iso is the output ISO
+ file, which is placed into the current directory.
+
+
+ Burn the ISO to CD-ROM and insert the disc. Boot the system and type
+ "ks" at the prompt (assuming you left the label for the
+ Kickstart boot as 'ks'). When you press Enter,
+ Kickstart starts running.
+
+
+
+ Integrating Kickstart with PXE
+
+ In addition to CD-ROM-based installs, Kickstart supports a Pre-Boot
+ Execution Environment (PXE). This is less error-prone than CDs, enables
+ kickstarting from bare metal, and integrates with existing PXE/DHCP
+ environments.
+
+
+ To use this method, make sure your systems have network interface cards
+ (NIC) that support PXE. Install and configure a PXE server and ensure
+ DHCP is running. Then place the appropriate files on an HTTP server for
+ deployment. Once the Kickstart profile has been created, use the
+ URL from the Kickstart Details page, as for
+ CD-ROM-based installs.
+
+
+
+ To obtain specific instructions for conducting PXE Kickstarts,
+ refer to the PXE Network Installations chapter of
+ the Red Hat Enterprise Linux 4 System Administration
+ Guide.
+
+
+
+ Running the Network Booting Tool, as described in the Red Hat
+ Enterprise Linux 4: System Administration Guide, select
+ "HTTP" as the protocol and include the domain name of
+ the SUSE Manager in the Server field if you intend to use it to
+ distribute the installation files.
+
+
+
+ The following sections describe the autoinstallation options available
+ from the
+ SystemsAutoinstallation
+ page.
+
+
+
+
+
+
+
+
+ Autoinstallation > Profiles (Kickstart and AutoYaST)
+
+
+
+
+ This page lists all profiles for your organization, shows whether these
+ profiles are active, and specifies the distribution tree with which each
+ profile is associated. You can either create a new Kickstart profile by
+ clicking the Create Kickstart Profile link, upload
+ or paste the contents of a new profile using the Upload
+ Kickstart/Autoyast File, or edit an existing Kickstart profile
+ by clicking the name of the profile. Note, you can only update AutoYaST
+ profiles using the upload button. You can also view AutoYaST profiles in
+ the edit box or change the virtualization type using the selection list.
+
+
+
+
+
+
+
+ Create a New Kickstart Profile
+
+ Click on the Create Kickstart Profile link from
+ the
+ SystemsAutoinstallation
+ page to start the wizard that populates the base values needed for a
+ Kickstart profile.
+
+
+ Creating a Kickstart Profile
+
+
+ On the first line, enter a Kickstart profile label. This label cannot
+ contain spaces, so use dashes (-) or underscores (_) as separators.
+
+
+
+
+ Select a Base Channel for this profile, which
+ consists of packages based on a specific architecture and Red Hat
+ Enterprise Linux release.
+
+
+
+
+ Select an Autoinstallable Tree for this profile. The
+ Autoinstallable Tree drop-down menu is only
+ populated if one or more distributions have been created for the
+ selected base channel (see ).
+
+
+
+
+
+ Instead of selecting a specific tree, you can also check the box
+ Always use the newest Tree for this base channel.
+ This setting lets SUSE Manager automatically pick the latest tree that
+ is associated with the specified base channels. If you add new trees
+ later, SUSE Manager will always keep the most recently created or
+ modified.
+
+
+
+
+ Select the Virtualization Type from the drop-down
+ menu.
+
+
+
+
+
+ On the second page, select (or enter) the location of the Kickstart
+ tree.
+
+
+
+
+ On the third page, select a root password for the system.
+
+
+
+
+ Depending on your base channel, your newly created Kickstart profile
+ might be subscribed to a channel that is missing required packages. For
+ Kickstart to work properly, the following packages should be present
+ in its base channel: pyOpenSSL,
+ rhnlib, libxml2-python, and
+ spacewalk-koan and associated packages.
+
+
+ To resolve this issue:
+
+
+
+
+ Make sure that the Tools software channel for the Kickstart profile's
+ base channel is available to your organization. If it is not, you must
+ request entitlements for the Tools software channel from the
+ SUSE Manager administrator.
+
+
+
+
+ Make sure that the Tools software channel for this Kickstart profile's
+ base channel is available to your SUSE Manager as a child channel.
+
+
+
+
+ Make sure that rhn-kickstart and associated
+ packages corresponding to this Kickstart are available in the
+ Tools child channel.
+
+
+
+
+ The final stage of the wizard presents the
+ Autoinstallation
+ DetailsDetails tab. On this tab
+ and the other subtabs, nearly every option for the new Kickstart profile
+ can be customized.
+
+
+ Once created, you can access the Kickstart profile by downloading it from
+ the Autoinstallation Details page by clicking the
+ Autoinstallation File subtab and clicking the
+ Download Autoinstallation File link.
+
+
+ If the Kickstart file is not managed by
+ SUSE Manager, you can access it via the following URL:
+
+http://my.manager.server/ks/dist/ks-rhel-ARCH-VARIANT-VERSION
+
+ In the above example, ARCH is the architecture
+ of the Kickstart file, VARIANT is either
+ client or server, and
+ VERSION is the release of Red Hat Enterprise Linux associated
+ with the Kickstart file.
+
+
+ The following sections describe the options available on each subtab.
+
+
+
+ Autoinstallation Details
+
+ >
+
+ Details
+
+
+
+
+ shows the subtabs that
+ are available. On the Autoinstallation
+ DetailsDetails page, you have
+ the following options:
+
+
+
+
+ Change the profile Label.
+
+
+
+
+ Change the operating system by clicking on
+ (Change).
+
+
+
+
+ Change the Virtualization Type.
+
+
+
+ Changing the Virtualization Type may require
+ changes to the Kickstart profile bootloader and partition options,
+ potentially overwriting user customizations. Consult the
+ Partitioning tab to verify any new or changed
+ settings.
+
+
+
+
+
+ Change the amount of Virtual Memory (in Megabytes
+ of RAM) allocated to virtual guests autoinstalled with this profile.
+
+
+
+
+ Change the number of Virtual CPUs for each virtual
+ guest.
+
+
+
+
+ Change the Virtual Storage Path from the default in
+ /var/lib/xen/.
+
+
+
+
+ Change the amount of Virtual Disk Space (in GB)
+ allotted to each virtual guest.
+
+
+
+
+ Change the Virtual Bridge for networking of the
+ virtual guest.
+
+
+
+
+ Deactivate the profile so that it cannot be used to schedule a
+ Kickstart by removing the Active check mark.
+
+
+
+
+ Check whether to enable logging for custom
+ scripts to the /root/ks-post.log file.
+
+
+
+
+ Decide whether to enable logging for custom
+ scripts to the /root/ks-pre.log file.
+
+
+
+
+ Choose whether to preserve the ks.cfg file and
+ all %include fragments to the
+ /root/ directory of all systems autoinstalled
+ with this profile.
+
+
+
+
+ Select whether this profile is the default for all of your
+ organization's Kickstarts by checking or unchecking the box.
+
+
+
+
+ Add any Kernel Options in the corresponding text
+ box.
+
+
+
+
+ Add any Post Kernel Options in the corresponding
+ text box.
+
+
+
+
+ Enter comments that are useful to you in distinguishing this profile
+ from others.
+
+
+
+
+
+
+ Autoinstallation Details
+
+ >
+
+ Operating System
+
+
+
+ On this page, you can make the following changes to the operating system
+ that the Kickstart profile installs:
+
+
+
+ Change the base channel
+
+
+ Select from the available base channels. SUSE Manager administrators
+ see a list of all base channels that are currently synced to the
+ SUSE Manager.
+
+
+
+
+ Child Channels
+
+
+ Subscribe to available child channels of the base channel, such as
+ the Tools channel.
+
+
+
+
+ Available Trees
+
+
+ Use the drop-down menu to choose from available trees associated with
+ the base channel.
+
+
+
+
+ Always use the newest Tree for this base channel.
+
+
+ Instead of selecting a specific tree, you can also check the box
+ Always use the newest Tree for this base channel.
+ This setting lets SUSE Manager automatically pick the latest tree
+ that is associated with the specified base channels. If you add new
+ trees later, SUSE Manager will always keep the most recently created
+ or modified.
+
+
+
+
+ Software URL (File Location)
+
+
+ The exact location from which the Kickstart tree is mounted. This
+ value is determined when the profile is created. You can view it on
+ this page but you cannot change it.
+
+
+
+
+
+
+
+ Autoinstallation Details
+
+ >
+
+ Variables
+
+
+ Autoinstallation variables can substitute values in Kickstart and
+ AutoYaST profiles. To define a variable, create a name-value pair
+ (name/value) in the text box.
+
+
+ For example, if you want to autoinstall a system that joins the network
+ of a specified organization (for example the Engineering department),
+ you can create a profile variable to set the IP address and the gateway
+ server address to a variable that any system using that profile will
+ use. Add the following line to the Variables text
+ box.
+
+IPADDR=192.168.0.28
+GATEWAY=192.168.0.1
+
+ Now you can use the name of the variable in the profile instead of a
+ specific value. For example, the part of a
+ Kickstart file looks like the following:
+
+network --bootproto=static --device=eth0 --onboot=on --ip=$IPADDR \
+ --gateway=$GATEWAY
+
+ The will be resolved to
+ 192.168.0.28, and the to
+ 192.168.0.1
+
+
+
+ There is a hierarchy when creating and using variables in Kickstart
+ files. System Kickstart variables take precedence over
+ Profile variables, which in turn take precedence
+ over Distribution variables. Understanding this
+ hierarchy can alleviate confusion when using variables in
+ Kickstarts.
+
+
+
+ Using variables are just one part of the larger Cobbler infrastructure
+ for creating templates that can be shared between multiple profiles and
+ systems.
+
+
+
+
+
+ Autoinstallation Details
+
+ >
+
+ Advanced Options
+
+
+
+ From this page, you can toggle several installation options on and off
+ by checking and unchecking the boxes to the left of the option. For most
+ installations, the default options are correct. Refer to Red Hat
+ Enterprise Linux documentation for details.
+
+
+
+ Assigning Default Profiles to an Organization
+
+ You can specify an Organization Default Profile by clicking on
+ Autoinstallation
+ Profiles profile name
+ Details, then checking the
+ Organization Default Profile box and finally clicking
+ on Update.
+
+
+
+ Assigning IP Ranges to Profiles
+
+ You can associate an IP range to an autoinstallation profile by clicking
+ on Autoinstallation
+ Profiles profile name
+ Bare Metal Autoinstallation, adding an
+ IPv4 range and finally clicking on Add IP Range.
+
+
+
+
+ Autoinstallation Details
+
+ >
+
+ Bare Metal Autoinstallation
+
+
+
+ This subtab provides the information necessary to Kickstart systems that
+ are not currently registered with SUSE Manager. Using the on-screen
+ instructions, you may either autoinstall systems using boot media
+ (CD-ROM) or by IP address.
+
+
+
+
+ System Details
+
+ >
+
+ Details
+
+
+
+
+ shows the subtabs that are
+ available from the System Details tab.
+
+
+ On the System
+ DetailsDetails page, you have
+ the following options:
+
+
+
+
+ Select between DHCP and static IP, depending on your network.
+
+
+
+
+ Choose the level of SELinux that is configured on kickstarted systems.
+
+
+
+
+ Enable configuration management or remote command execution on
+ kickstarted systems.
+
+
+
+
+ Change the root password associated with this profile.
+
+
+
+
+
+
+ System Details
+
+ >
+
+ Locale
+
+
+
+ Change the timezone for kickstarted systems.
+
+
+
+
+ System Details
+
+ >
+
+ Partitioning
+
+
+
+ From this subtab, indicate the partitions that you wish to create during
+ installation. For example:
+
+partition /boot --fstype=ext3 --size=200
+partition swap --size=2000
+partition pv.01 --size=1000 --grow
+volgroup myvg pv.01 logvol / --vgname=myvg --name=rootvol --size=1000 --grow
+
+
+
+ System Details
+
+ >
+
+ File Preservation
+
+
+
+ If you have previously created a file preservation list, include this
+ list as part of the Kickstart. This will protect the listed files
+ from being over-written during the installation process. Refer to
+ for information on how to
+ create a file preservation list.
+
+
+
+
+ System Details
+
+ >
+
+ GPG & SSL
+
+
+
+ From this subtab, select the GPG keys and/or SSL certificates to be
+ exported to the kickstarted system during the %post section of the
+ Kickstart. For SUSE Manager customers, this list includes the SSL
+ Certificate used during the installation of SUSE Manager.
+
+
+
+ Any GPG key you wish to export to the kickstarted system must be in
+ ASCII rather than binary format.
+
+
+
+
+
+ System Details
+
+ >
+
+ Troubleshooting
+
+
+
+ From this subtab, change information that may help with troubleshooting
+ hardware problems:
+
+
+
+ Bootloader
+
+
+ For some headless systems, it is better to select the non-graphic
+ LILO bootloader.
+
+
+
+
+ Kernel Parameters
+
+
+ Enter kernel parameters here that may help to narrow down the source
+ of hardware issues.
+
+
+
+
+
+
+
+
+ Software
+
+ >
+
+ Package Groups
+
+
+
+
+ shows the subtabs that are
+ available from the Software tab.
+
+
+ Enter the package groups, such as @office or
+ @admin-tools you would like to install on the
+ kickstarted system in the large text box. If you would like to know what
+ package groups are available, and what packages they contain, refer to
+ the RedHat/base/ file of your Kickstart tree.
+
+
+
+
+
+ Software
+
+ >
+
+ Package Profiles
+
+
+
+ If you have previously created a Package Profile from one of your
+ registered systems, you can use that profile as a template for the
+ files to be installed on a kickstarted system. Refer to in for more information about
+ package profiles.
+
+
+
+ Activation Keys
+
+
+ The Activation Keys tab allows you to select
+ Activation Keys to include as part of the Kickstart profile. These keys,
+ which must be created before the Kickstart profile, will be used when
+ re-registering kickstarted systems.
+
+
+
+ Scripts
+
+
+ The Scripts tab is where %pre and %post scripts are
+ created. This page lists any scripts that have already been created for
+ this Kickstart profile. To create a new Kickstart script, perform the
+ following procedure:
+
+
+
+
+ Click the add new kickstart script link in the
+ upper right corner.
+
+
+
+
+
+ Enter the path to the scripting language used to create the script,
+ such as /usr/bin/perl.
+
+
+
+
+ Enter the full script in the large text box.
+
+
+
+
+ Indicate whether this script is to be executed in the %pre or %post
+ section of the Kickstart process.
+
+
+
+
+ Indicate whether this script is to run outside of the chroot
+ environment. Refer to the Post-installation
+ Script section of the Red Hat Enterprise Linux
+ System Administration Guide for further explanation of the
+ option.
+
+
+
+
+
+ SUSE Manager supports the inclusion of separate files within the
+ Partition Details section of the Kickstart profile. For instance, you
+ may dynamically generate a partition file based on the machine type and
+ number of disks at Kickstart time. This file can be created via
+ %pre script and placed on the system, such as
+ /tmp/part-include. Then you can call for that file
+ by entering the following line in the Partition Details field of the
+ System
+ DetailsPartitioning tab:
+
+%include /tmp/part-include
+
+
+
+
+ Autoinstallation File
+
+
+ The Autoinstallation File tab allows you to view or
+ download the profile that has been generated from the options chosen in
+ the previous tabs.
+
+
+
+
+
+
+
+ Upload Kickstart/AutoYaST File
+
+ Click on the Upload Kickstart/Autoyast File link
+ from the
+ SystemsAutoinstallation
+ page to upload an externally prepared AutoYaST or Kickstart profile.
+
+
+
+
+ In the first line, enter a profile Label for the
+ automated installation. This label cannot contain spaces, so use dashes
+ (-) or underscores (_) as separators.
+
+
+
+
+ Select an Autoinstallable Tree for this profile. The
+ Autoinstallable Tree drop-down menu is only
+ populated if one or more distributions have been created for the
+ selected base channel (see ).
+
+
+
+
+ Instead of selecting a specific tree, you can also check the box
+ Always use the newest Tree for this base channel.
+ This setting lets SUSE Manager automatically pick the latest tree that
+ is associated with the specified base channels. If you add new trees
+ later, SUSE Manager will always keep the most recently created or
+ modified.
+
+
+
+
+ Select the Virtualization Type from the drop-down
+ menu.
+
+
+
+ If you do not intend to use the autoinstall profile to create virtual
+ guest systems, you can leave the drop-down set to the default choice
+ KVM Virtualized Guest.
+
+
+
+
+
+ Finally, either provide the file contents with cut-and-paste or update
+ the file from the local storage medium:
+
+
+
+
+ Paste it into the File Contents box and click
+ Create, or
+
+
+
+
+ enter the file name in the File to Upload field
+ and click Upload File.
+
+
+
+
+
+
+ Once done, four subtabs are available: Details
+ (see ), Bare
+ Metal (see ),Variables
+ (see ), and
+ Autoinstallable File (see ) are available.
+
+
+
+
+
+
+ Autoinstallation
+
+ >
+
+ Bare Metal
+
+
+
+ Lists the IP addresses that have been associated with the profiles
+ created by your organization. Click either the range or the profile name
+ to access different tabs of the Autoinstallation
+ Details page.
+
+
+
+
+
+ Autoinstallation
+
+ >
+
+ GPG and SSL Keys
+
+
+
+ Lists keys and certificates available for inclusion in Kickstart
+ profiles and provides a means to create new ones. This is especially
+ important for customers of SUSE Manager or the Proxy Server because
+ systems kickstarted by them must have the server key imported into
+ SUSE Manager and associated with the relevant Kickstart profiles.
+ Import it by creating a new key here and then make the profile
+ association in the GPG and SSL keys subtab of the
+ Autoinstallation Details page.
+
+
+ To create a new key or certificate, click the Create Stored
+ Key/Cert link in the upper-right corner of the page. Enter a
+ description, select the type, upload the file, and click the
+ Update Key button. Note that a unique description is
+ required.
+
+
+
+ The GPG key you upload to SUSE Manager must be in ASCII format. Using a
+ GPG key in binary format causes anaconda, and therefore the
+ Kickstart process, to fail.
+
+
+
+
+
+
+
+
+ Autoinstallation
+
+ >
+
+ Distributions
+
+
+
+ The Distributions page enables you to find and create
+ custom installation trees that may be used for automated installations.
+
+
+
+ The Distributions page does not display distributions
+ already provided. They can be found within the
+ Distribution drop-down menu of the
+ Autoinstallation Details page.
+
+
+
+ Before creating a distribution, you must make an installation data
+ available, as described in the Automated
+ Installation chapter of the SUSE Linux Enterprise
+ Deployment Guide (section Simple Mass
+ Installation, Providing the Installation Data
)
+ or, respectively, the Kickstart Installations
+ chapter of the Red Hat Enterprise Linux System Administration
+ Guide. This tree must be located in a local directory on the
+ SUSE Manager server.
+
+
+
+ Creating a Distribution for Autoinstallation
+
+
+ To create a distribution, on the Autoinstallable
+ Distributions page click Create
+ Distribution in the upper right corner.
+
+
+
+
+ On the Create Autoinstallable Distribution page,
+ provide the following data:
+
+
+
+
+ Enter a label (without spaces) in the Distribution
+ Label field, such as my-orgs-sles-12-sp1
+ or my-orgs-rhel-as-7.
+
+
+
+
+ In the Tree Path field, paste the path to the base
+ of the installation tree.
+
+ For Red Hat Enterprise Linux systems, you can test this by appending
+ "images/pxeboot/README" to the URL in a Web browser,
+ pressing Enter, and ensuring that the readme file
+ appears.
+
+
+
+
+ Select the matching distribution from the Base
+ Channel and Installer Generation
+ drop-down menus, such as SUSE Linux for SUSE Linux Enterprise,
+ or Red Hat Enterprise Linux 7 for Red Hat Enterprise Linux 7
+ client systems.
+
+
+
+
+
+
+ When finished, click the Create Autoinstallable
+ Distribution button.
+
+
+
+
+
+
+ Autoinstallation
+
+ >
+
+ Distributions
+
+ >
+
+ Variables
+
+
+
+ Autoinstallation variables can be used to substitute values into
+ Kickstart and AutoYaST profiles. To define a variable, create a
+ name-value pair (name/value) in the text box.
+
+
+ For example, if you want to autoinstall a system that joins the network
+ of a specified organization (for example the Engineering department) you
+ can create a profile variable to set the IP address and the gateway
+ server address to a variable that any system using that profile will
+ use. Add the following line to the Variables text
+ box.
+
+IPADDR=192.168.0.28
+GATEWAY=192.168.0.1
+
+ To use the distribution variable, use the name of the variable in the
+ profile to substitute the value. For example, the
+ part of a Kickstart file looks like the
+ following:
+
+network --bootproto=static --device=eth0 --onboot=on --ip=$IPADDR \
+ --gateway=$GATEWAY
+
+ The will be resolved to
+ 192.168.0.28, and the to
+ 192.168.0.1.
+
+
+
+ There is a hierarchy when creating and using variables in Kickstart
+ files. System Kickstart variables take precedence over Profile
+ variables, which in turn take precedence over Distribution variables.
+ Understanding this hierarchy can alleviate confusion when using
+ variables in Kickstarts.
+
+
+
+ In AutoYaST profiles you can use such variables as well.
+
+
+ Using variables are just one part of the larger Cobbler infrastructure
+ for creating templates that can be shared between multiple profiles and
+ systems.
+
+
+
+
+
+
+
+ Autoinstallation
+
+ >
+
+ File Preservation
+
+
+
+ Collects lists of files to be protected and re-deployed on systems during
+ Kickstart. For instance, if you have many custom configuration files
+ located on a system to be kickstarted, enter them here as a list and
+ associate that list with the Kickstart profile to be used.
+
+
+ To use this feature, click the Create File Preservation
+ List link at the top. Enter a suitable label and all files and
+ directories to be preserved. Enter absolute paths to all files and
+ directories. Then click Create List.
+
+
+
+ Although file preservation is useful, it does have limitations. Each
+ list is limited to a total size of 1 MB. Special devices like
+ /dev/hda1 and /dev/sda1 are
+ not supported. Only file and directory names may be entered. No regular
+ expression wildcards can be used.
+
+
+
+ When finished, you may include the file preservation list in the
+ Kickstart profile to be used on systems containing those files. Refer
+ to for
+ precise steps.
+
+
+
+
+
+
+
+
+ Autoinstallation
+
+ >
+
+ Autoinstallation Snippets
+
+
+
+ Use snippets to store common blocks of code that can be shared across
+ multiple Kickstart or AutoYaST profiles in SUSE Manager.
+
+
+
+ Autoinstallation
+
+ >
+
+ Autoinstallation Snippets
+
+ >
+
+ Default Snippets
+
+
+ Default snippets coming with SUSE Manager are not editable. You can use
+ a snippet, if you add the Snippet Macro statement
+ such as $SNIPPET('spacewalk/sles_register_script') to
+ your autoinstallation profile. This is an AutoYaST profile example:
+
+<init-scripts config:type="list">
+ $SNIPPET('spacewalk/sles_register_script')
+</init-scripts>
+
+ When you create a snippet with the Create Snippet
+ link, all profiles including that snippet will be updated accordingly.
+
+
+
+
+ Autoinstallation
+
+ >
+
+ Autoinstallation Snippets
+
+ >
+
+ Custom Snippets
+
+
+ This is the tab with custom snippets. Click a name of a snippet to view,
+ edit, or delete it.
+
+
+
+
+ Autoinstallation
+
+ >
+
+ Autoinstallation Snippets
+
+ >
+
+ All Snippets
+
+
+ The All Snippets tab lists default and custom
+ snippets together.
+
+
+
+
+
+
+ Software Crashes
+
+
+
+
+ Virtual Host Managers
+
+
+
+
+
+ Salt
+
+
+
+ Web Interface
+ Salt
+ If you click the Salt tab on the top navigation bar, by default the
+ Salt
+ Onboarding
+ view appears. On the left sidebar you can select Remote
+ Commands to execute remote commands on your Salt Minions. You may also define
+ a States Catalog for creating a collection of salt system states.
+
+
+ Onboarding
+ The Onboarding page provides a summary of your minions, including
+ their names, fingerprints, current state, and actions you may perform on them.
+
+ Once you have pointed a minion to the SUSE Manager server as its master within
+ /etc/salt/minion, you can choose to accept or reject a
+ minion from this page.
+
+
+
+
+
+
+ Bootstrapping Salt Minions
+ The Salt > Bootstrapping page allows you to
+ bootstrap Salt minions from the WebUI.
+
+
+
+ Bootstrapping Parameters
+
+ Host
+
+ Place the FQDN of the minion to be bootstrapped within this field.
+
+
+
+ SSH Port
+
+ Place the SSH port that will be used to connect and bootstrap a machine.
+ The default is 22
+
+
+
+ User
+
+ Input the minions user login. The default is
+ root.
+
+
+
+
+ Password
+
+ Input the minions login password.
+
+
+
+ Activation Key
+
+ Select the activation key(associated with a software source channel) that
+ the minion should use to bootstrap with.
+
+
+
+
+ Disable Strict Key Host Checking
+
+ This checkbox is selected by default. This allows the script to
+ auto-accept host keys without requiring a user to manually
+ authenticate
+
+
+
+
+ Manage System Completely via SSH (Will not install an Agent)
+
+ If selected a system will automatically be configured to use SSH. No other
+ connection method will be configured.
+
+
+
+
+ Once your minions connection details have been filled in click the +
+ Bootstrap it button. One the minion has completed the bootstrap process,
+ you can find your new minion located under the Systems tab.
+
+
+
+ Remote Commands
+ The remote commands page allows you to execute and run commands from the SUSE Manager
+ server on minions.
+
+ Remote Commands Security
+ All commands run from the Remote Commands page are executed as
+ root on minions. As you may use wildcards to run commands across any number of
+ systems you must always take extra precaution as this may have drastic consequences
+ for your systems.
+
+
+ From the Remote Commands page within located under
+ Salt
+ Remote Commands
+ you will see two input fields. The first field is for entering commands.
+ The second field is for targeting minions by name, group or by utilizing
+ wildcards.
+
+ Enter the command you wish to execute, then enter the minion, group or wildcard you
+ wish to execute the command on. Click the Preview button to see which
+ machines will be targeted. Click the Run button to execute a command
+ on the previewed systems.
+
+ Previewing Target Systems
+ The Preview button should always be selected before the
+ Run button. Systems you wish to run a command on must be
+ available as a target.
+
+
+
+
+
+
+ States Catalog
+ Selecting
+ Salt
+ States Catalog
+ presents you with the States Catalog overview. You can
+ use this page to manage custom Salt states. Salt states created on this page may be
+ applied to organizations, groups and single systems.
+
+
+ Click the + Create State button to open the Create
+ State page. On this page you can define custom states which will be stored
+ within the State Catalog. For example, to create a custom state for
+ installation of the apache webserver you would provide a name:
+ Webserver and the state content:
+
+ apache:
+ pkg.installed: []
+ service.running:
+ - require:
+ - pkg: apache
+
+ When you have finished entering your custom state definition click the Create
+ State button. This will save the state and allow use of this specific
+ state at the minion, group and organization level.
+
+ To add this state at the individual minion level perform the following actions:
+
+ Using a State with a Minion
+
+ From the Systems overview page, select a salt managed
+ minion. You will be taken to the System Details page.
+
+
+ Select
+ States
+ Custom
+ from the navigation tabs.
+
+
+ Click the search button to look at an overview of all available states, or
+ enter a custom state name to find it within the States
+ Catalog.
+
+
+ Select the Assign checkbox to assign the state to this
+ minion.
+
+
+ Click the Save button to save this assignment to the
+ database. Then click the Apply button to finalize application
+ of the state or states which you have selected.
+
+
+
+ The Save Button
+ The Save button saves your changes but does not apply the
+ state. If you leave the page once clicking the save button your state will be saved
+ to the database but not yet applied. You must apply states for all minions, groups
+ or organizations for states to be finalized.
+
+
+
+
+ Patches
+
+
+
+ Patches
+
+
+ WebLogic
+ Patches
+
+
+ Select the Patches tab from the top navigation bar to
+ track the availability and application of patches to your managed systems.
+
+
+ The Patches Overview page displays relevant patches for
+ at least one of your managed systems that have not been applied yet.
+
+
+ Receiving Patches for Your System
+
+ To receive an email when patches are issued for your system, go to
+ OverviewYour
+ Preferences and select Receive email
+ notifications.
+
+
+
+ SUSE distinguishes three types of patches: security updates, bug fix
+ updates, and enhancement updates. Each patch is comprised of a summary of
+ the problem and solution, including the RPM packages fixing the problem.
+
+
+ Icons are used to identify the three types:
+
+
+
+
+
+ Security Alert
+
+
+
+
+
+
+
+
+ — Security Updates available,
+ strongly recommended
+
+
+
+
+
+ Bug Fix Alert
+
+
+
+
+
+
+
+
+ — Bug Fix Updates available, recommended
+
+
+
+
+
+ Enhancement Alert
+
+
+
+
+
+
+
+ — Enhancement Updates available, optional
+
+
+
+
+ A summary of each patch is provided in list form displaying its type,
+ advisory ID, synopsis (with the severity as a textual prefix in case
+ of security updates, such as critical
,
+ important
, moderate
, or
+ low
), number of affected systems in your network, and
+ date updated.
+
+
+ In addition, you may view patches by product line at the following
+ location: .
+ For more information on security updates, see
+ .
+
+
+
+ Relevant Patches
+
+ Web UI
+ Relevant Patches
+
+
+ Web UI
+ viewing list of applicable patches
+
+
+ WebLogic
+ Relevant Patches
+
+
+
+ The Relevant patches page displays a customized list
+ of patches applying to your registered systems (see
+ ).
+
+
+
+
+
+
+ Clicking an Advisory ID of a patch takes you to the
+ Details page of the Patch Details
+ page. Clicking on the number of associated systems takes you to the
+ Affected Systems page of the Patch
+ Details page. Refer to
+ for more information.
+
+
+
+
+ All Patches
+
+
+ Web UI
+ All Patches
+
+
+ Web UI
+ viewing list of all patches
+
+
+ WebLogic
+ All Patches
+
+
+
+
+
+ The All Patches page displays a list of all
+ patches released by SUSE,irrelevant of whether they apply to your
+ registered systems or not. Like in the Relevant
+ Patches page, clicking either Advisory
+ or the number of systems affected takes you to related tabs of the
+ Patch Details page. Refer to for more information.
+
+
+
+ Apply Patches
+ Web UI
+ applying patches
+
+ Patches include a list of updated packages.
+
+ To apply patches to a system, the system must be entitled.
+ toms 2011-02-07: How? Add link to section or procedure.
+
+
+ Apply all applicable patches to a system by clicking on
+ SystemsSystems
+ in the top and left navigation bars. Click on the name of an
+ entitled system. Then in the System Details page
+ click the
+ SoftwarePatches subtab. When the relevant patch list appears, click
+ Select All then Apply Patches
+ on the bottom right-hand corner of the page. Only patches not
+ scheduled, scheduled but failed, or canceled patches are
+ listed. Pending updates are excluded.
+
+
+ In addition, users with appropriate roles can apply patches using
+ two other methods:
+
+
+
+
+ To apply a specific patch to one or more systems, locate it in the
+ patch list and click on the number of systems affected, which takes
+ you to the Affected Systems page of the
+ Patch Details page. Select the individual systems
+ to be updated and click the Apply Patches button.
+ Double-check the systems to be updated on the confirmation page, then
+ click the Confirm button.
+
+
+
+
+ To apply more than one patch to one or more systems, select the
+ systems from the Systems list. Click the
+ System Set Manager link in the left navigation
+ bar, then click the Systems tab. After ensuring
+ the appropriate systems are selected, click the
+ Patches tab, select the patches to apply, and
+ click the Apply Patches button. Schedule a date
+ and time for the patch to be applied. Default is the current
+ date. Click the Confirm button. You
+ can follow the progress of the patch application via the
+ Pending Actions list. Refer to for more details.
+
+
+
+
+
+
+ If you use scheduled package installation, the packages or patches
+ are installed via the SUSE Manager daemon
+ (rhnsd). You must enable the SUSE Manager daemon on
+ your systems. For more information about the SUSE Manager daemon, see
+ “The rhnsd (Default)” (Section “Contact Methods”, ↑Best Practices).
+
+
+
+ The following rules apply to patches:
+
+
+
+
+ Each package is a member of one or more channels. If a selected system
+ is not subscribed to a channel containing the package, the update will
+ not be installed on that system.
+
+
+
+
+ If a newer version of the package is already installed on the system,
+ the update will not be installed.
+
+
+
+
+ If an older version of the package is installed, the package will be
+ upgraded.
+
+
+
+
+
+
+ Patch Details
+ Web UI
+ viewing details
+
+ If you click on the advisory of a patch in the
+ Relevant or All pages, its
+ Patch Details page appears. This page is further
+ divided into the following tabs:
+
+
+
+ Patch Details > Details
+
+ This subtab displays the patch report issued by SUSE. It provides
+ a synopsis of the patch first (with the severity as a textual
+ prefix in case of security updates, such as
+ critical
, important
,
+ moderate
, or low
), issue date, and
+ any update dates. This is followed by a description of the patch
+ and the steps required to resolve the issue.
+
+
+ Below the Affected Channels label, all channels that
+ contain the affected package are listed. Clicking on a channel name
+ displays the Packages subtab of the Channel
+ Details page for that channel. Refer to
+ for more information.
+
+
+
+ Security updates list the specific vulnerability as tracked by
+ . This information is listed
+ below the CVEs label.
+
+
+
+
+ OVAL is an open vulnerability and assessment language promoted by
+ Mitre, . Clicking on the link
+ below the Oval label downloads this information to
+ your system. More useful are the collected SUSE Linux
+ security updates on
+ .
+
+
+
+ Patch Details > Packages
+
+ This page provides links to each of the updated RPMs by channel.
+ Clicking on the name of a package displays its Package
+ Details page.
+
+
+
+ Patch Details > Affected Systems
+
+ This page lists systems affected by the patches. You can apply updates
+ here. (See .) Clicking on the name
+ of a system takes you to its System Details page.
+ Refer to for more information.
+
+
+ To determine whether an update has been scheduled, refer to the
+ Status column in the affected systems table.
+ Possible values are: N/A, Pending, Picked Up, Completed, and Failed.
+ This column identifies only the last action related to a patch. For
+ instance, if an action fails and you reschedule it, this column shows
+ the status of the patch as pending with no mention of the previous
+ failure. Clicking a status other than N/A takes you
+ to the Action Details page. This column corresponds
+ to one on the Patch tab of the System
+ Details page.
+
+
+
+
+
+
+
+
+ Advanced Search
+
+ Web UI
+
+ Advanced Search
+
+ Web UI
+
+ searching
+
+ WebLogic
+
+ Patch Search
+
+
+ The Patches Search page allows you to search through
+ patches by specific criteria.
+
+
+
+
+
+
+
+ All Fields — Search patches by synopsis,
+ description, topic, or solution.
+
+
+
+
+ Patch Advisory — The name or the label of the
+ patch.
+
+
+
+
+
+
+ Package Name — Search particular packages by name:
+
+kernel
+
+ Results will be grouped by advisory. For example, searching for
+ 'kernel' returns all package names containing the string
+ kernel, grouped by advisory.
+
+
+
+
+ CVE — The name assigned to the security advisory
+ by the Common Vulnerabilities and Exposures (CVE) project at
+ . For example:
+
+CVE-2006-4535
+
+
+
+
+ To filter patch search results, check or uncheck the boxes next to the
+ type of advisory:
+
+
+
+
+
+ Bug Fix Advisory — Patches that fix issues reported by users or
+ discovered during development or testing.
+
+
+
+
+ Security Advisory — Patches fixing a security issue found during
+ development, testing, or reported by users or a software security
+ clearing house. A security advisory usually has one or more CVE names
+ associated with each vulnerability found in each package.
+
+
+
+
+ Product Enhancement Advisory — Patches providing new features,
+ improving functionality, or enhancing performance of a package.
+
+
+
+
+
+
+
+ Manage Patches
+
+
+ Web UI
+
+ Managing Patches
+
+
+ Custom patches enable organizations to issue patch alerts for the
+ packages in their custom channels, schedule deployment and manage patches
+ across organizations.
+
+
+
+
+ If the organization is using both SUSE Manager and SUSE Manager Proxy
+ server, then manage patches only on the SUSE Manager server since the
+ proxy servers receive updates directly from it. Managing patches on
+ a proxy in this combined configuration risks putting your servers
+ out of sync.
+
+
+
+
+
+ Creating and Editing Patches
+ Web UI
+ Creating Patches
+ Web UI
+ Editing Patches
+
+ To create a custom patch alert, proceed as follows:
+
+
+
+
+ On the top navigation bar, click on Patches,
+ then select Manage Patches on the left
+ navigation bar. On the Patches Management page,
+ click Create Patch.
+
+
+
+
+ Enter a label for the patch in the Advisory field,
+ ideally following a naming convention adopted by your organization.
+ emap: Similar patch
+ beginnings for SUSE? "Note that this label cannot begin with the
+ letters "RH" (capitalized or not) to prevent confusion between
+ custom errata and those issued by Red Hat."
+
+
+
+
+ Complete all remaining required fields, then click the Create
+ Patch button. View standard SUSE Alerts for examples of
+ properly completed fields.
+
+
+
+
+ Patch management distinguishes between published and unpublished patches.
+
+
+
+
+
+ Published: displays the patch alerts the
+ organization has created and disseminated. To edit an existing
+ published patch, follow the steps described in . To distribute the patch, click
+ Send Notification on the top-right corner of
+ the Patch Details page. The patch alert is sent
+ to the administrators of all affected systems.
+
+
+
+
+ Unublished: displays the patch alerts your
+ organization has created but not yet distributed. To edit an
+ existing unpublished patch, follow the steps described in . To publish the patch, click
+ Publish Patch on the top-right corner of the
+ Patch Details page. Confirm the channels
+ associated with the patch and click the Publish
+ Patch button, now in the lower-right corner. The patch
+ alert is moved to the Published page awaiting
+ distribution.
+
+
+
+
+ SUSE Manager administrators can also create patches by cloning an
+ existing one. Cloning preserves package associations and simplifies
+ issuing patches. See for
+ instructions.
+
+
+ To edit an existing patch alert's details, click its advisory on the
+ Patches Management page, make the changes in the
+ appropriate fields of the Details tab, and click the
+ Update Patch button. Click on the
+ Channels tab to alter the patch's channel
+ association. Click on the Packages tab to view and
+ modify its packages.
+
+
+ To delete patches, select their check boxes on the Patches
+ Management page, click the Delete Patches
+ button, and confirm the action. Deleting published patches
+ might take a few minutes.
+
+
+
+
+ Assigning Packages to Patches
+
+ To assign packages to patches, proceed as follows:
+
+
+
+
+ Select a patch, click on the Packages tab, then the
+ Add subtab.
+
+
+
+
+ To associate packages with the patch being edited, select the channel
+ from the View drop-down menu that contains the
+ packages and click View. Packages already
+ associated with the patch being edited are not displayed. Selecting
+ All managed packages presents all available
+ packages.
+
+
+
+
+ After clicking View, the package list for the
+ selected option appears. Note that the page header still lists the
+ patch being edited.
+
+
+
+
+ In the list, select the check boxes of the packages to be assigned to
+ the edited patch and click Add Packages at the
+ bottom-right corner of the page.
+
+
+
+
+ A confirmation page appears with the packages listed. Click
+ Confirm to associate the packages with the patch.
+ The List/Remove subtab of the Managed
+ Patch Details page appears with the new packages listed.
+
+
+
+
+ Once packages are assigned to a patch, the patch cache is updated to
+ reflect the changes. This update is delayed briefly so that users may
+ finish editing a patch before all the changes are made available. To
+ initiate the changes to the cache manually, follow the directions to
+ commit the changes immediately at the top of the
+ page.
+
+
+
+
+ Publishing Patches
+ Web UI
+ Publishing Patches
+
+ After adding packages to the patch, the patch needs to be published to
+ be disseminated to affected systems. Follow this procedure to publish
+ patches:
+
+
+
+
+ On the top navigation bar, click on Patches, then
+ Manage Patches on the left navigation bar.
+
+
+
+
+ Click on Publish Patch. A confirmation page appears
+ that will ask you to select which channels you wish to make the patch
+ available in. Choose the relevant channels.
+
+
+
+
+ Click Publish Patch. The patch published will now
+ appear on the Published page of Manage
+ Patches.
+
+
+
+
+
+
+
+ Cloning Patches
+
+ Web UI
+ Cloning Patches
+
+
+ Patches can be cloned for easy replication and distribution as part of
+ SUSE Manager. Only patches potentially applicable to one of your
+ channels can be cloned. Patches can be applicable to a channel if that
+ channel was cloned from a channel to which the patch applies. To access
+ this functionality, click Patches on the top
+ navigation bar, then Clone Patches on the left
+ navigation bar.
+
+
+ On the Clone Patches page, select the channel
+ containing the patch from the View drop-down menu and
+ click View. Once the patch list appears, select the
+ check box of the patch to be cloned and click Clone
+ Patch. A confirmation page appears with the patch listed.
+ Click Confirm to finish cloning.
+
+
+ The cloned patch appears in the Unpublished patch
+ list. Verify the patch text and the packages associated with that patch,
+ then publish the patch so it is available to users in your organization.
+
+
+
+
+
+ Channels
+
+
+
+ Web UI
+ Software and Configuration Files
+ channels
+ WebLogic
+ channels
+
+
+ If you click the Channels tab on the top navigation
+ bar, the Channels category and links appear. The pages
+ in the Channels category enable you to view and manage
+ the channels and packages associated with your systems.
+
+
+ Once DEV is complete for channel management chapter add a link here
+
+
+
+ Software Channels
+
+
+ base channel
+
+
+ channel list
+
+
+ child channel
+
+
+ WebLogic
+ channel list
+
+
+
+ The Software Channels page is the first to appear in
+ the Channels category. A software channel provides
+ packages grouped by products or applications to ease the selection of
+ packages to be installed on a system.
+
+
+
+ There are two types of software channels: base channels and child
+ channels.
+
+
+
+ Base Channels
+ channels
+ base
+
+ A base channel consists of packages built for a specific architecture
+ and release. For example, all of the packages in SUSE Linux Enterprise Server 12 for
+ the x86_64 architecture make up a base channel. The list of packages in
+ SUSE Linux Enterprise Server 12 for the s390x architecture make up a different base
+ channel.
+
+
+ A system must be subscribed to only one base channel assigned
+ automatically during registration based on the SUSE Linux Enterprise release and
+ system architecture. In case of paid base channels, an associated
+ subscription must exist.
+
+ toms 2011-02-07: Need to add a section about Long Term
+ Support?
+
+
+
+
+ Child Channels
+ channels
+ child
+
+ A child channel is associated with a base channel and provides extra
+ packages. For instance, an organization can create a child channel
+ associated with SUSE Linux Enterprise Server on x86_64 architecture that contains extra
+ packages for a custom application.
+
+
+ A system can be subscribed to multiple child channels of its base
+ channel. Only packages provided by a subscribed channel can be
+ installed or updated. SUSE Manager Administrators and Channel
+ Administrators have channel management authority. This authority
+ gives them the ability to create and manage their own custom
+ channels.
+
+
+
+ Do not create child channels containing packages that are not
+ compatible with the client system.
+
+
+ toms 2010-12-17 @DEV: Is this list of packages still correct?
+Text from the rh docs:
+
+ In addition, your child channels should not contain copies of content
+ from the <guilabel>rhn-tools</guilabel> or
+ <guilabel>rhel-virtualization</guilabel> channels because packages from
+ those channels are used to identify these channel when auto-subscribing
+ systems using the Web user interface. The specific packages are
+ <filename>rhncfg</filename> (used to identify the
+ <guilabel>rhn-tools</guilabel> channel) and
+ <filename>libvirt</filename> (used to identify the
+ <guilabel>rhel-vt</guilabel> channel).
+
+
+
+
+ Channels can be further distinguished by relevance: All
+ Channels, SUSE Channels,
+ Popular Channels, My Channels,
+ Shared Channels, and Retired
+ Channels.
+
+
+
+
+ All Channels
+ channels
+ all
+
+ Under Software Channels in the left navigation bar
+ click All Channels to reach the page shown in
+ . All channels available to your
+ organization are listed. Links within this list go to different tabs of
+ the Software Channel Details page. Clicking on a
+ channel name takes you to the Details tab. Clicking
+ on the number of packages takes you to the Packages
+ tab. Clicking on the number of systems takes you to the
+ Subscribed Systems tab. Refer to
+ for details.
+
+
+
+
+
+ SUSE Channels
+ channels
+ Novell
+
+ The SUSE Channels page displays the SUSE
+ channels and their available child channels.
+
+
+ SUSE Channels Cannot be Deleted
+
+ Once imported, SUSE channels cannot be deleted. Only custom
+ software channels can be deleted.
+
+
+
+
+
+ Popular Channels
+ channels
+ popular
+
+ The Popular Channels page displays the software
+ channels most subscribed by systems registered to your organization. You
+ can refine the search by using the drop-down menu to list only the
+ channels with at least a certain number of systems subscribed.
+
+
+
+
+ My Channels
+ channels
+ my
+
+ The My Channels page displays all software channels
+ that belong to your organization, including both SUSE and custom
+ channels. Use the text box to filter by channel name.
+
+
+
+
+ Shared Channels
+ channels
+ shared
+
+ The Shared Channels page displays the channels shared
+ with others in the organizational trust.
+
+
+
+
+
+ Retired Channels
+ channels
+ retired
+
+ The Retired Channels page displays available channels
+ that have reached their end-of-life dates and do not receive updates.
+
+
+
+
+ Software Channel Details
+ channel list
+ channel details
+ WebLogic
+ software channel details
+
+ If you click on the name of a channel, the Software Channel
+ Details page appears. Here the following tabs are available:
+
+
+ Software Channel Details > Details
+
+ General information about the channel and its parent if applicable.
+ This summary, description, and architecture is also displayed when
+ clicking on a channel.
+
+
+ In addition, Per-User Subscription
+ Restrictions can be set globally by SUSE Manager
+ administrators and channel administrators. By default, any user can
+ subscribe channels to a system. To manage user permissions, select
+ Only selected users within your organization may subscribe to
+ this channel and click Update. The
+ Subscribers tab appears. Click on it to grant
+ specific users subscription permissions to a channel. SUSE Manager
+ administrators and channel administrators can always subscribe any
+ channels to a system.
+
+
+ Only customers with custom base channels can change their
+ systems' base channel assignments via the SUSE Manager Web interface in
+ two ways:
+
+
+
+
+ Assign the system to a custom base channel.
+
+
+
+
+ Revert subscriptions from a custom base channel to the appropriate
+ distribution-based base channel.
+
+
+
+
+
+ The assigned base channel must match the installed system. For
+ example, a system running SUSE Linux Enterprise 11 for x86_64 cannot be
+ registered to a SUSE Linux Enterprise 12 for s390x base channel. Use the
+ files /etc/os-release or
+ /etc/SuSE-release to check your product,
+ architecture (try uname -a), version, and patch
+ level.
+
+
+
+
+ Software Channel Details > Managers
+ software channels
+ managers
+
+ On the Managers page, you can check which users are
+ authorized to manage the selected channel. Real name and email address
+ are listed with the user names. Organization and Channel administrators
+ can manage any channel. As a SUSE Manager administrator you can change
+ roles for specific users by clicking on the name. For more information
+ on user management and the User Details page, see
+ .
+
+
+
+ Software Channel Details > Patches
+ software channels
+ patches
+
+ This page lists patches to be applied to packages provided in the
+ channel. The list displays advisory types, names, summaries, and issue
+ dates. Clicking on an advisory name takes you to its Patch
+ Details page. Refer to
+ for more information.
+
+
+
+ Software Channel Details > Packages
+ software channels
+ packages
+ package installation
+ filter
+
+ This page lists packages in the channel. Clicking on a package name
+ takes you to the Package Details page. This page
+ displays a set of tabs with information about the package, including
+ architectures on which it runs, the package size, build date, package
+ dependencies, change log, list of files in the package, newer versions,
+ and which systems have the package installed. Download the packages as
+ RPMs.
+
+
+
+ To search for a specific package or a subset of packages, use the
+ package filter at the top of the list. Enter a substring to search for
+ package names containing the string. For example, typing
+ dd in the filter might return:
+ dd_rescue,
+ ddclient, and
+ uuidd. The filter is case-insensitive.
+
+
+
+
+ Software Channel Details > Subscribed Systems
+
+ The list displays system names and their system
+ type. Clicking on a system name takes you to its System
+ Details page. Refer to for more information.
+
+
+
+ Software Channel Details > Target Systems
+
+ List of systems eligible for subscription to the channel. This
+ tab appears only for child channels. Use the check boxes to select the
+ systems, then click the Confirm and
+ Subscribe button on the bottom right-hand corner.
+ You will receive a success message or be notified of any errors. This
+ can also be accomplished through the Channels tab of
+ the System Details page. Refer to
+ for more information.
+
+
+
+
+
+
+ Package Search
+
+
+ SUSE Manager Administrator
+ package search
+
+
+
+ SUSE Manager Administrator
+
+ searching
+
+ WebLogic
+
+ software search
+
+
+
+
+
+ The Package Search page allows you to search through
+ packages using various criteria provided by the What to search
+ for selection list:
+
+
+
+
+
+ Free Form — a general keyword search useful when
+ the details of a particular package and its contents are unknown.
+
+
+
+
+ Name Only — Targeted search to find a specific
+ package known by name.
+
+
+
+
+ Name and Summary — Search for a package or program
+ which might not show up in the respective package name but in its
+ one-line summary.
+
+
+
+
+ Name and Description — Search package names and
+ their descriptions. Search results for web browser
+ include both graphical and text-based browsers.
+
+
+
+
+
+ The Free Form field additionally allows you to search
+ using field names that you prepend to search queries and filter results
+ by that field keyword.
+
+
+
+ For example, if you wanted to search all of the SUSE Linux Enterprise packages for the
+ word java in the description and summary, type the
+ following in the Free Form field:
+
+
+summary:java and description:java
+
+
+ Other supported field names include:
+
+
+
+
+
+ name: search package names for a particular keyword,
+
+
+
+
+ version: search for a particular package version,
+
+
+
+
+ filename: search the package filenames for a
+ particular keyword,
+
+
+
+
+ description: search the packages' detailed
+ descriptions for a particular keyword,
+
+
+
+
+ summary: search the packages' brief summary for a
+ particular keyword,
+
+
+
+
+ arch: search the packages by their architecture
+ (such as x86_64, ppc64le, or s390).
+
+
+
+
+
+ You can also limit searches to Channels relevant to your
+ systems by clicking the check box. Additionally, you can
+ restrict your search by platform (Specific channel you have
+ access to) or architecture (Packages of a specific
+ architecture ...).
+
+
+
+
+ Manage Software Channels
+
+
+ software channels
+ managing
+
+
+
+ This tab allows administrators to create, clone, and delete custom
+ channels. These channels may contain altered versions of
+ distribution-based channels or custom packages.
+
+
+
+ Manage Software Channels > Channel Details
+
+ The default screen of the Manage Software Channels
+ tab lists all available channels including custom, distribution-based,
+ and child channels.
+
+
+ To clone an existing channel, click the Clone
+ Channel link. Select the channel to be cloned from the
+ drop-down menu, select whether to clone the current state (including
+ patches) or the original state (without patches). You can also select
+ specific patches to use for cloning. Then click the Create
+ Channel button. In the next screen select options for the new
+ channel, including base architecture and GPG, then click Create
+ Channel.
+
+
+ To create a new channel, click the Create Channel
+ link. Select the appropriate options for the new channel, including base
+ architecture and GPG options, then click Create
+ Channel. Note that a channel created in this manner is blank,
+ containing no packages. You must either upload software packages or add
+ packages from other repositories. You may also choose to include patches
+ in your custom channel.
+
+
+ Manage Software Channels > Channel Details > Details
+
+ This screen lists the selections made during channel creation.
+
+
+
+ Manage Software Channels > Channel Details > Managers
+
+ SUSE Manager administrators and channel administrators may alter or
+ delete any channel. To grant other users rights to alter or delete this
+ channel, check the box next to the user's name and click
+ Update.
+
+
+ To allow all users to manage the channel, click the Select
+ All button at the bottom of the list then click
+ Update. To remove a user's right to manage the
+ channel, uncheck the box next to their name and click
+ Update.
+
+
+
+
+ Manage Software Channels > Channel Details > Patches
+
+ Channel managers can list, remove, clone, and add patches to their
+ custom channel. Custom channels not cloned from a distribution may not
+ contain patches until packages are available. Only patches that match
+ the base architecture and apply to a package in that channel may be
+ added. Finally, only cloned or custom patches may be added to custom
+ channels. Patches may be included in a cloned channel if they are
+ selected during channel creation.
+
+
+ The Sync tab lists patches that were updated since
+ they were originally cloned in the selected cloned channel. More
+ specifically, a patch is listed here if and only if:
+
+
+
+
+ it is a cloned patch,
+
+
+
+
+ it belongs to the selected cloned channel,
+
+
+
+
+ it has already been published in the selected cloned channel,
+
+
+
+
+ it does not contain a package that the original patch has, or it has
+ at least one package with a different version with respect to the
+ corresponding one in the original patch, or both.
+
+
+
+
+ Clicking on the Sync Patches button opens a
+ confirmation page in which a subset of those patches can be
+ selected for synchronization. Clicking on the
+ Confirm button in the confirmation page results
+ in such patches being copied over from the original channel to the
+ cloned channel, thus updating corresponding packages.
+
+
+
+ Manage Software Channels > Channel Details > Packages
+
+ As with patches, administrators can list, remove, compare, and add
+ packages to a custom channel.
+
+
+ To list all packages in the channel, click the List / Remove
+ Packages link. Check the box to the left of any package you
+ wish to remove, then click Remove Packages.
+
+
+ To add packages, click the Add Packages link. From
+ the drop down menu choose a channel from which to add packages and
+ click View to continue. Check the box to the left of
+ any package you wish to add to the custom channel, then click
+ Add Packages.
+
+
+ To compare packages in the current channel with those in another,
+ select that channel from the drop-down menu and click
+ Compare. Packages in both channels are compared,
+ including architecture and version. The results are displayed on the
+ next screen.
+
+
+ To make the two channels identical, click the Merge
+ Differences button. In the next dialog, resolve any
+ conflicts. Preview Merge allows you to review the
+ changes before applying them to the channels. Select those packages
+ that you wish to merge. Click Merge Packages then
+ Confirm to perform the merge.
+
+
+
+ Manage Software Channels > Channel Details > Repositories
+
+ On the Repositories page, assign software
+ repositories to the channel and synchronize repository content:
+
+
+
+
+ Add/Remove lists configured repositories, which
+ can be added and removed by selecting the check box next to the
+ repository name and clicking Update Repositories.
+
+
+
+
+ Sync lists configured repositories. The
+ synchronization schedule can be set using the drop-down boxes, or an
+ immediate synchronization can be performed by clicking Sync
+ Now.
+
+
+
+
+ The Manage Repositories tab to the left shows all
+ assigned repositories. Click on a name to see details and possibly
+ delete a repository.
+
+
+
+
+
+ Manage Software Channels > Manage Software Packages
+
+ To manage custom software packages, list all software or view only
+ packages in a custom channel. Select the respective channel from the
+ drop-down menu and click View Packages.
+
+
+
+
+ Manage Software Channels > Manage Repositories
+
+ Add or manage custom or third-party package repositories and link the
+ repositories to an existing channel. The repositories feature currently
+ supports repomd repositories.
+
+
+ To create a new repository click the Create
+ Repository link at the top right of the Manage
+ Repositories page. The Create Repository
+ screen prompts you to enter a Repository Label such
+ as sles-12-x86_64 and a Repository
+ URL. You may enter URLs pointing to mirror lists or direct
+ download repositories, then click Create Repository.
+
+
+ To link the new repository to an existing software channel, select
+ Manage Software Channels from the left menu, then
+ click the channel you want to link. In the channel's detail page, click
+ the Repositories subtab, then check the box next to
+ the repository you want to link to the channel. Click Update
+ Repositories.
+
+
+ To synchronize packages from a custom repository to your channel, click
+ the Sync link from the channel's
+ Repositories subtab, and confirm by clicking the
+ Sync button.
+
+
+ You can also perform a sync via command-line by using the
+ spacewalk-repo-sync command, which additionally
+ allows you to accept keys.
+
+
+ spacewalk-repo-sync creates log files in the
+ /var/log/rhn/reposync directory. SUSE Manager
+ uses one log file per channel and reuses it with the next sync run.
+
+
+
+
+
+
+
+
+ Distribution Channel Mapping
+
+
+
+
+
+ Audit
+
+ Select the Audit tab from the top navigation bar to audit your managed
+ systems.
+
+
+ CVE Audit
+
+
+
+
+
+ CVE audit
+ SUSE Manager data
+
+
+ The CVE
+ Audit page will display a list of client systems with their patch status regarding a
+ given CVE (Common Vulnerabilities and Exposures) number.
+
+
+
+
+ Normal Usage
+
+ CVE Audit
+ usage
+ Proceed as follows if you want to verify that a client system has received a given CVE
+ patch:
+
+
+ Make sure that the CVE data is up-to-date. For more information, see .
+
+
+
+ Click the Audit tab to open the CVE
+ Audit page.
+
+
+ Input a 13-char CVE identifier in the CVE Number field. The year
+ setting will be automatically adjusted. Alternatively, set the year manually and add the last
+ four digits.
+
+
+ Optionally, uncheck the patch statuses you are not interested in.
+
+
+ Click Audit systems.
+
+
+
+ Performing this procedure will result in a list of client systems,
+ where each system comes with a Patch Status
+ belonging to the given CVE identifier. Possible statuses are:
+
+
+
+
+
+ [red]
+
+
+
+
+
+
+
+ — Affected, patches are available in
+ channels that are not assigned:
+
+ The system is affected by the vulnerability and SUSE Manager has one or more patches for
+ it, but at this moment, the channels offering the patches are not assigned to the system.
+
+
+
+
+
+
+ [orange]
+
+
+
+
+
+
+
+ — Affected, at least one patch available in
+ an assigned channel:
+
+ The system is affected by the vulnerability, SUSE Manager has at least one patch for it in
+ a channel that is directly assigned to the system.
+
+
+
+
+
+ [grey]
+
+
+
+
+
+
+
+ — Not affected:
+
+ The system does not have any packages installed that are patchable.
+
+
+
+
+
+ [green]
+
+
+
+
+
+
+
+ — Patched:
+
+ A patch has already been installed.
+
+
+
+ In other words, it can mean the following:
+
+
+
+ More than one patch might be needed to fix a certain vulnerability.
+
+
+ The
+
+ [orange]
+
+
+
+
+
+
+
+ state is displayed as soon as SUSE Manager has at least one patch in an
+ assigned channel. This might mean that, after installing such patch, others might be
+ needed—users should double check the CVE Audit page after applying a patch to be sure
+ that their systems are not affected anymore.
+
+
+ For a more precise definitions of these states, see .
+
+ Unknown CVE Number
+ If the CVE number is not known to SUSE Manager, an error message is displayed because
+ SUSE Manager is unable to collect and display any audit data.
+
+ For each system, the Next Action column contains suggestions on the
+ steps to take in order to address the vulnerabilities. Under these circumstances it is either
+ sensible to install a certain patch or assign a new channel. If applicable, a list of
+ candidate
channels or patches is displayed for your convenience.
+ You can also assign systems to a System Set for further batch
+ processing.
+
+
+
+ API Usage
+ An API method called audit.listSystemsByPatchStatus is available to run
+ CVE audits from custom scripts. Details on how to use it are available in the API guide.
+
+
+
+ Maintaining CVE Data
+ To produce correct results, CVE Audit must periodically refresh the data needed for the
+ search in the background. By default, the refresh is scheduled at 11:00 PM every night. It is
+ recommended to run such a refresh right after the SUSE Manager installation to get proper results
+ immediately instead of waiting until the next day.
+
+
+ In the Web interface, click the Admin tab.
+
+
+ Click Task Schedules in the left menu.
+
+
+ Click the cve-server-channels-default schedule link.
+
+
+ Click the cve-server-channels-bunch link.
+
+
+ Click the Single Run Schedule button.
+
+
+ After some minutes, refresh the page and check that the scheduled run status is
+ FINISHED.
+
+
+
+ A direct link is also available in the CVE Audit
+ tab (extra CVE data update).
+
+
+
+
+ Tips and Background Information
+ Audit results are only correct if the assignment of channels to systems did not change
+ since the last scheduled refresh (normally at 11:00 PM every night). If a CVE audit is needed
+ and channels were assigned or unassigned to any system during the day, a manual run is
+ recommended. For more information, see .
+ Systems are called affected
, not affected
or
+ patched
not in an absolute sense, but based on information available to
+ SUSE Manager. This implies that concepts such as being affected by a vulnerability
+ have particular meanings in this context. The following definitions apply:
+
+
+ System affected by a certain vulnerability:
+
+ A system which has an installed package with version lower than the version of the same
+ package in a relevant patch marked for the vulnerability.
+
+
+
+ System not affected by a certain vulnerability:
+
+ A system which has no installed package that is also in a relevant patch marked for the
+ vulnerability.
+
+
+
+ System patched for a certain vulnerability:
+
+ A system which has an installed package with version equal to or greater than the
+ version of the same package in a relevant patch marked for the vulnerability.
+
+
+
+ Relevant patch:
+
+ A patch known by SUSE Manager in a relevant channel.
+
+
+
+ Relevant channel:
+
+ A channel managed by SUSE Manager, which is either assigned to the system, the original of
+ a cloned channel which is assigned to the system, a channel linked to a product which is
+ installed on the system or a past or future service pack channel for the system.
+
+
+
+ A notable consequence of the above definitions is that results can be incorrect in cases
+ of unmanaged channels, unmanaged packages, or non-compliant systems.
+
+
+
+
+
+ Subscription Matching
+
+ To match subscriptions with your systems utilize the new subscription-matcher tool. It
+ gathers information about systems, subscriptions and pinned matches (fixed customer defined
+ subscriptions to systems mapping) as input and returns the best possible match according to the
+ SUSE Terms and Conditions. The subscription-matcher is also able to
+ write some CSV Reports:
+
+
+ The Subscriptions Report provides subscriptions report data when
+ used
+
+
+ The Unmatched Products Report provides information on products
+ and their systems when a match to a subscription cannot be found
+
+
+ The Error Report provides a list of errors raised during the
+ matching process
+
+
+
+ Selecting
+ Audit
+ Subscription Matching
+ from the left navigation bar will provide you with an overview of all results
+ generated by the Subscription Matcher. The Subscription Matcher helps provide visual coverage on
+ subscription usage and enable more accurate reporting.
+
+
+ Subscription Matcher Accuracy
+ This tool's goal is to help provide visual coverage on current subscription use and enable
+ more accurate reporting. The Subscription Matcher is excellent at matching
+ systems and products registered with SUSE Manager however any systems, products or environments
+ which are not found in the database will remain unmatched. This tool was never intended to act
+ as a replacement for auditing. Auditing should always take precedence over subscription
+ matching.
+
+
+ Coexistence of virtual-host-gather and Poller
+ virtual-host-gatherer should not be used for gathering the data from hypervisors that are
+ registered systems in SUSE Manager (running SLES or RES). Information from these hypervisors has
+ already been retrieved using the poller utility and redundant information from
+ virtual-host-gatherer may cause conflicts.
+
+
+ The Subscription Matching overview provides subscription part numbers, product descriptions,
+ policies, matched total subscriptions used and remaining, and the start and end dates of
+ subscriptions.
+
+
+
+
+
+
+ Matched/Total
+
+ If the total amounts of a subscription are fully matched, the quantity column value is
+ highlighted with a yellow warning triangle:
+
+ Warning
+
+
+
+
+
+
+
+
+
+
+
+
+ Expiration Warning
+
+ When a subscription will expire within less than 3 months, the record is
+ highlighted
+
+
+
+
+ Expired Subscriptions
+
+ If a subscription is expired, the record for it is faded
+
+
+
+
+
+ Subscription Matcher Reports
+
+ SUSE Manager 3 automatically generates up-to-date nightly status reports by matching your
+ SUSE subscriptions with all your registered systems. These reports are stored in
+ /var/lib/spacewalk/subscription-matcher and provided in CSV format.
+ These CSV files may be opened with any mainstream spreadsheet application such as LibreOffice
+ Calc.
+
+ If you would like to schedule these reports to be produced at different times, or a at a
+ certain frequency or schedule a one time completion you can perform this task by editing the
+ Taskomatic settings for the gatherer-matcher located under Schedule name at:
+ Admin
+ Task Schedules
+ gatherer-matcher-default
+
+
+
+
+
+
+
+ Unmatched Systems
+ Selecting the
+ Subscription Matching
+ Unmatched Products
+ tab provides an overview of all systems the matcher could not find in the database
+ or which were not registered with SUSE Manager. The Unmatched Products overview
+ contains product names and the number of unmatched systems which remain unmatched with known
+ installed products.
+
+
+
+
+ Show System List
+
+
+ Select to open and display a list of all systems which were
+ detected with an installed product but remain unmatched with a
+ subscription.
+
+
+
+
+
+
+
+
+
+ Subscription Pinning
+ The Subscription Pinning feature allows a user to instruct the subscription-matcher to
+ favour matching a specific subscription with a given system or group of systems. This is
+ achieved by creating Pins. Each pin contains information about the preferred subscription-system
+ match.
+
+
+ Respecting Pins
+ In some cases the algorithm may determine that a specific pin cannot be respected,
+ depending on the subscription's availability and applicability rules, in this case it will be
+ shown as not satisfied.
+
+
+ The Pins table displays a list of all pins. Items in the list contain the status of pins,
+ which can be satisfied, not satisfied and pending
+ next run.
+
+
+
+ A pin is Satisfied if its system and subscription was matched in the
+ last matcher run
+
+
+ A pin is Not Satisfied if its system and subscription was
+ NOT matched in the last matcher run. This can happen, for example, if
+ the pin violates terms and conditions for subscriptions.
+
+
+ A Pin is in the Pending Next Run state when it needs a new matcher run
+ to be taken into account. After the new run, the pin will become either
+ Satisfied or Not Satisfied.
+
+
+
+
+
+ Click the + Add a Pin button to open the Available
+ Systems window. You may filter systems by name and select a system for the matcher to
+ pin manually.
+
+
+
+ Within the Subscriptions Available for Selected System window click the
+ Save Pin button to raise priority for subscription use on the selected
+ system.
+
+
+
+ Subscription Matching Messages
+ You can review all messages related to Subscription Matching from the
+ Subscription Matching
+ Messages
+ overview.
+
+
+
+
+ Virtual Host Managers
+
+ A pre-requirement for matching subscriptions with systems is to have complete information
+ covering which virtual systems are running on which virtual host. SUSE Manager gets this
+ information without any further configuration if the hypervisor is a registered system running
+ on SUSE Linux Enterprise Server or RedHat Enterprise Linux. Third party hypervisors, like VMware
+ or Hyper-V, will need further configuration as described in this chapter.
+
+ Third party hypervisors and hypervisor managers such as VMWare vCenter are called "Virtual
+ Host Managers" (VHM) within SUSE Manager, as they are able to manage one or multiple virtual hosts,
+ which in turn may contain virtual guests. SUSE Manager 3 ships with a tool
+ virtual-host-gatherer that can connect to VHMs using their API, and request
+ information about virtual hosts. This tool is automatically invoked via Taskomatic nightly,
+ therefore you need to configure your VHMs via XMLRPC APIs.
+ virtual-host-gatherer maintains the concept of optional modules, where each
+ module enables a specific Virtual Host Manager.
+
+ On the Subscription Matching overview select Edit Virtual Host
+ Managers. This will take you to the
+ Systems
+ Virtual Host Managers
+ overview. In the upper right you can select either + Add VMware-based
+ Virtual Host Manager or + Add File-based Virtual Host Manager.
+ These modules allow you to match subscriptions to machines managed by a virtual host such as
+ VMware, ESX, ESXi and
+ vCenter. If using an unsupported virtual host you can also create a custom
+ json file to provide virtual-host-gatherer with the required host/guest
+ information.
+
+
+
+ VMware-based Virtual Host Manager
+
+ Select VMware-based Virtual Host Manager to enter the location of your
+ VMware-based virtual host. Enter a Label:, Hostname:,
+ Port:, Username: and Password:.
+ Finally click the + Add Virtual Host Manager button.
+
+
+
+
+
+
+ File Based Virtual Host Manager
+ If you are using an unsupported virtual host manager, you can create and use a file
+ formatted in json containing information about a host and all managed guest machines. Select
+ + File Based Virtual Host Manager then enter a label and URL leading to the
+ location of this file for the virtual-host-gatherer
+
+
+ The following json example describes how this file should look:
+
+ $> virtual-host-gatherer --infile infile.json
+ {
+ "examplevhost": {
+ "10.11.12.13": {
+ "cpuArch": "x86_64",
+ "cpuDescription": "AMD Opteron(tm) Processor 4386",
+ "cpuMhz": 3092.212727,
+ "cpuVendor": "amd",
+ "hostIdentifier": "'vim.HostSystem:host-182'",
+ "name": "11.11.12.13",
+ "os": "VMware ESXi",
+ "osVersion": "5.5.0",
+ "ramMb": 65512,
+ "totalCpuCores": 16,
+ "totalCpuSockets": 2,
+ "totalCpuThreads": 16,
+ "type": "vmware",
+ "vms": {
+ "vCenter": "564d6d90-459c-2256-8f39-3cb2bd24b7b0"
+ }
+ },
+ "10.11.12.14": {
+ "cpuArch": "x86_64",
+ "cpuDescription": "AMD Opteron(tm) Processor 4386",
+ "cpuMhz": 3092.212639,
+ "cpuVendor": "amd",
+ "hostIdentifier": "'vim.HostSystem:host-183'",
+ "name": "10.11.12.14",
+ "os": "VMware ESXi",
+ "osVersion": "5.5.0",
+ "ramMb": 65512,
+ "totalCpuCores": 16,
+ "totalCpuSockets": 2,
+ "totalCpuThreads": 16,
+ "type": "vmware",
+ "vms": {
+ "49737e0a-c9e6-4ceb-aef8-6a9452f67cb5": "4230c60f-3f98-2a65-f7c3-600b26b79c22",
+ "5a2e4e63-a957-426b-bfa8-4169302e4fdb": "42307b15-1618-0595-01f2-427ffcddd88e",
+ "NSX-gateway": "4230d43e-aafe-38ba-5a9e-3cb67c03a16a",
+ "NSX-l3gateway": "4230b00f-0b21-0e9d-dfde-6c7b06909d5f",
+ "NSX-service": "4230e924-b714-198b-348b-25de01482fd9"
+ }
+ }
+ }
+ }
+
+ For more information see the man page on your SUSE Manager server for virtual-host-gatherer:
+
+ # man virtual-host-gatherer
+
+
+
+
+
+ Configuring Virtual Host Managers via XMLRPC API
+ The following APIs allow you to get a list of available virtual-host-manager modules and
+ the parameters they require:
+
+
+ virtualhostmanager.listAvailableVirtualHostGathererModules(session)
+
+
+ virtualhostmanager.getModuleParameters(session, moduleName)
+
+
+ The following APIs allow you to create and delete VHMs. Take care that the module parameter
+ map must match the map returned by
+ virtualhostmanager.getModuleParameters to work correctly:
+
+
+ virtualhostmanager.create(session, label, moduleName, parameters)
+
+
+ virtualhostmanager.delete(session, label)
+
+
+ The following APIs return information about configured
+ VHMs:
+
+
+ virtualhostmanager.listVirtualHostManagers(session)
+
+
+ virtualhostmanager.getDetail(session, label)
+
+
+
+
+
+
+ OpenSCAP
+
+
+ If you click Audit tab in the top navigation bar
+ and then the OpenSCAP tab on the left navigation
+ bar, an overview of the OpenSCAP Scans appears. SCAP (Security
+ Content Automation Protocol) is a framework to maintain the security
+ of enterprise systems. It mainly performs the following tasks:
+
+
+
+
+ automatically verifies the presence of patches,
+
+
+ checks system security configuration settings,
+
+
+ examines systems for signs of compromise.
+
+
+
+ For a description of the Web interface dialogs, see .
+
+ For instructions and tips on how to best use OpenSCAP with SUSE Manager, refer to . To learn more about OpenSCAP check out the project homepage at .
+
+
+ System Security via OpenSCAP
+
+
+ The Security Certification and Authorization Package (SCAP) is a
+ standardized compliance checking solution for enterprise-level Linux
+ infrastructures. It is a line of specifications maintained by the National
+ Institute of Standards and Technology (NIST) for maintaining system
+ security for enterprise systems.
+
+
+ SUSE Manager uses OpenSCAP to implement the SCAP
+ specifications. OpenSCAP is an auditing tool that utilizes the Extensible
+ Configuration Checklist Description Format (XCCDF). XCCDF is a standard
+ way of expressing checklist content and defines security checklists. It
+ also combines with other specifications such as Common Platform
+ Enumeration (CPE), Common Configuration Enumeration (CCE), and Open
+ Vulnerability and Assessment Language (OVAL), to create a SCAP-expressed
+ checklist that can be processed by SCAP-validated products.
+
+
+ OpenSCAP Features
+
+
+ OpenSCAP verifies the presence of patches by using content produced by
+ the SUSE Security Team
+ (), checks
+ system security configuration settings and examines systems for signs of
+ compromise by using rules based on standards and specifications.
+
+
+
+ To effectively use OpenSCAP, the following must be available:
+
+
+
+
+ A tool to verify a system confirms to a standard
+
+
+ SUSE Manager uses OpenSCAP as an auditing feature. It
+ allows you to schedule and view compliance scans for any system.
+
+
+
+
+ SCAP content
+
+
+ SCAP content files defining the test rules can be created from scratch
+ if you understand at least XCCDF or OVAL. XCCDF content is also
+ frequently published online under open source licenses and this
+ content can be customized to suit your needs.
+
+
+ The openscap-content package provides default
+ content guidance for systems via a template.
+
+
+
+
+
+
+
+ SUSE supports the use of templates to evaluate your systems.
+ However, you are creating custom content at your own risk.
+
+
+
+
+ ke 2013-08-28:
+
+ Do we have SCAP content providers? Such as:
+
+ The United States Government Configuration Baseline (USGCB) for
+ RHEL5 Desktop or Community-provided content (openscap-content
+ package)? For more info, see https://access.redhat.com/site/documentation/en-US/Red_Hat_Network_Satellite/5.5/html/User_Guide/chap-Red_Hat_Network_Satellite-User_Guide-OpenSCAP.html
+
+
+
+
+
+ SCAP was created to provide a standardized approach to maintaining system
+ security, and the standards that are used will therefore continually
+ change to meet the needs of the community and enterprise businesses. New
+ specifications are governed by NIST's SCAP Release cycle in order to
+ provide a consistent and repeatable revision work flow. For more
+ information, see .
+
+
+
+ Prerequisites for Using OpenSCAP in SUSE Manager
+
+
+ The following sections describe the server and client prerequisites for
+ using OpenSCAP.
+
+
+
+
+ Package Requirements
+
+
+ As Server: SUSE Manager 1.7 or later.
+
+
+ For the Client: spacewalk-oscap package
+ (available from the SUSE Manager Tools Child Channel).
+
+
+
+
+ Entitlement Requirements
+
+
+ A Management entitlement is required for scheduling scans.
+
+
+
+
+ Other Requirements
+
+
+ Client: Distribution of the XCCDF content to all client machines.
+
+
+
+
+
+
+ You can distribute XCCDF content to client machines using any of the
+ following methods:
+
+
+
+
+
+ Traditional Methods (CD, USB, NFS, scp, ftp)
+
+
+
+
+ SUSE Manager Scripts
+
+
+
+
+ RPMs
+
+
+
+
+
+ Custom RPMs are the recommended way to distribute SCAP content to other
+ machines. RPM packages can be signed and verified to ensure their
+ integrity. Installation, removal, and verification of RPM packages can be
+ managed from the user interface.
+
+
+
+ Performing Audit Scans
+
+
+ OpenSCAP integration in SUSE Manager provides the ability to perform
+ audit scans on client systems. This section describes the available
+ scanning methods.
+
+
+
+ Scans via the Web Interface
+
+
+ To perform a scan via the Web interface, log in to SUSE Manager.
+
+
+
+
+ Click on Systems and select the target system.
+
+
+
+
+ Click on
+ AuditSchedule.
+
+
+
+
+ Fill in the Schedule New XCCDF Scan form. See
+ for more information about the
+ fields on this page.
+
+
+
+ The XCCDF content is validated before it is run on the remote system.
+ Specifying invalid arguments can make spacewalk-oscap fail to validate
+ or run. Due to security concerns the oscap xccdf
+ eval command only accepts a limited set of parameters.
+
+
+
+ Run the mgr_check command to ensure the action is
+ being picked up by the client system.
+
+mgr_check -vv
+
+
+ If the SUSE Manager daemon (rhnsd) or
+ osad are running on the client system, the action
+ will be picked up by these services. To check if they are running,
+ use:
+
+service rhnsd start
+
+ or
+
+service osad start
+
+
+
+
+
+ To view the results of the scan, refer to
+ .
+
+
+
+
+
+ Scans via API
+
+
+ To perform an audit scan via API, choose an existing script or create a
+ script for scheduling a system scan through
+ system.scap.scheduleXccdfScan, the front end API,
+ for example:
+
+#!/usr/bin/python
+client = xmlrpclib.Server('https://spacewalk.example.com/rpc/api')
+key = client.auth.login('username', 'password')
+client.system.scap.scheduleXccdfScan(key, 1000010001,
+ '/usr/local/share/scap/usgcb-sled11desktop-xccdf.xml',
+ '--profile united_states_government_configuration_baseline')
+
+ Where:
+
+
+
+
+ 1000010001 is the system ID (sid).
+
+
+
+
+ /usr/local/share/scap/usgcb-sled11desktop-xccdf.xml
+ is the path to the content location on the client system. In this
+ case, it assumes USGCB content in the
+ /usr/local/share/scap directory.
+
+
+
+
+ is an
+ additional argument for the oscap command. In this
+ case, it is using the USGCB.
+
+
+
+
+
+
+ Run the script on the command-line interface of any system. The system
+ needs the appropriate Python and XML-RPC libraries installed.
+
+
+
+
+ Run the mgr_check command to ensure that the action
+ is being picked up by the client system.
+
+mgr_check -vv
+
+ If the SUSE Manager daemon (rhnsd) or
+ osad are running on the client system, the action
+ will be picked up by these services. To check if they are running, use:
+
+service rhnsd start
+
+ or
+
+service osad start
+
+
+
+
+ Enabling Upload of Detailed SCAP Files
+
+ To make sure detailed information about the scan will be available,
+ activate the upload of detailed SCAP files on the clients to be
+ evaluated. On the Admin page, click on
+ Organization and select one. Click on the
+ Configuration tab and check Enable Upload Of
+ Detailed SCAP Files. This feature generates an additional HTML
+ version when you run a scan. The results will show an extra line like:
+ Detailed Results: xccdf-report.html xccdf-results.xml
+ scap-yast2sec-oval.xml.result.xml.
+
+
+
+
+ Viewing SCAP Results
+
+
+ There are three methods of viewing the results of finished scans:
+
+
+
+
+
+ Via the Web interface. Once the scan has finished, the results should
+ show up on the Audit tab of a specific system. This
+ page is discussed in .
+
+
+
+
+ Via the API functions in handler system.scap.
+
+
+
+
+ Via the spacewalk-report command as follows:
+
+spacewalk-report system-history-scap
+spacewalk-report scap-scan
+spacewalk-report scap-scan-results
+
+
+
+
+ OpenSCAP SUSE Manager Web Interface
+
+
+ The following sections describe the tabs in the SUSE Manager Web
+ interface that provide access to OpenSCAP and its features.
+
+
+
+ OpenSCAP Scans Page
+
+ Click the Audit tab on the top navigation bar, then
+ OpenSCAP on the left. Here you can view, search for, and compare
+ completed OpenSCAP scans.
+
+
+
+ OpenSCAP > All Scans
+
+
+ All Scans is the default page that appears on the
+ AuditOpenSCAP
+ page. Here you see all the completed OpenSCAP scans you have permission
+ to view. Permissions for scans are derived from system permissions.
+
+
+
+ For each scan, the following information is displayed:
+
+
+
+ System:
+
+
+ the scanned system.
+
+
+
+
+ XCCDF Profile:
+
+
+ the evaluated profile.
+
+
+
+
+ Completed:
+
+
+ time of completion.
+
+
+
+
+ Satisfied:
+
+
+ number of rules satisfied. A rule is considered to be satisfied if
+ the result of the evaluation is either Pass or Fixed.
+
+
+
+
+ Dissatisfied:
+
+
+ number of rules that were not satisfied. A rule is considered
+ Dissatisfied if the result of the evaluation is a Fail.
+
+
+
+
+ Unknown:
+
+
+ number of rules which failed to evaluate. A rule is considered to be
+ Unknown if the result of the evaluation is an Error, Unknown or Not
+ Checked.
+
+
+
+
+
+ The evaluation of XCCDF rules may also return status results like
+ Informational, Not Applicable, or
+ not Selected. In such cases, the given rule is not
+ included in the statistics on this page. See
+ System
+ DetailsAudit for information
+ on these types of results.
+
+
+
+
+ OpenSCAP > XCCDF Diff
+
+
+ XCCDF Diff is an application that visualizes the comparison of two
+ XCCDF scans. It shows metadata for two scans as well as the lists of
+ results.
+
+
+
+ Click the appropriate icon on the Scans page to access the diff output
+ of similar scans. Alternatively, specify the ID of scans you want to
+ compare.
+
+
+ Items that show up in only one of the compared scans are considered to
+ be "varying". Varying items are always highlighted in beige. There are
+ three possible comparison modes:
+
+
+
+ Full Comparison
+
+
+ all the scanned items.
+
+
+
+
+ Only Changed Items:
+
+
+ items that have changed.
+
+
+
+
+ Only Invariant:
+
+
+ unchanged or similar items.
+
+
+
+
+
+
+
+ OpenSCAP > Advanced Search
+
+
+ Use the Advanced Search page to search through your scans according to
+ specified criteria including:
+
+
+
+
+ rule results,
+
+
+
+
+ targeted machine,
+
+
+
+
+ time frame of the scan.
+
+
+
+
+
+ The search either returns a list of results or a list of scans, which
+ are included in the results.
+
+
+
+
+
+ Systems Audit Page
+
+ To display a system's audit page, click
+ Systemssystem_nameAudit.
+ Use this page to schedule and view compliance scans for a particular
+ system. Scans are performed by the OpenSCAP tool, which implements
+ NIST's standard Security Content Automation Protocol (SCAP). Before you
+ scan a system, make sure that the SCAP content is prepared and all
+ prerequisites in are met.
+
+
+ List Scans
+
+
+ This subtab lists a summary of all scans completed on the system. The
+ following columns are displayed:
+
+
+
+
+
+ XCCDF Test Result
+
+
+ The scan test result name, which provides a link to the detailed
+ results of the scan.
+
+
+
+
+ Completed
+
+
+ The exact time the scan finished.
+
+
+
+
+ Compliance
+
+
+ The unweighted pass/fail ratio of compliance based on the Standard
+ used.
+
+
+
+
+ P
+
+
+ Number of checks that passed.
+
+
+
+
+ F
+
+
+ Number of checks that failed.
+
+
+
+
+ E
+
+
+ Number of errors that occurred during the scan.
+
+
+
+
+ U
+
+
+ Unknown.
+
+
+
+
+ N
+
+
+ Not applicable to the machine.
+
+
+
+
+ K
+
+
+ Not checked.
+
+
+
+
+ S
+
+
+ Not Selected.
+
+
+
+
+ I
+
+
+ Informational.
+
+
+
+
+ X
+
+
+ Fixed.
+
+
+
+
+ Total
+
+
+ Total number of checks.
+
+
+
+
+
+ Each entry starts with an icon indicating the results of a comparison
+ to a previous similar scan. The icons indicate the following:
+
+
+
+
+
+ "RHN List Checked" Icon — no difference between the compared scans.
+
+
+
+
+ "RHN List Alert" Icon — arbitrary differences between the compared
+ scans.
+
+
+
+
+ "RHN List Error" Icon — major differences between the compared
+ scans. Either there are more failures than the previous scan or less
+ passes
+
+
+
+
+ "RHN List Check In" Icon — no comparable scan was found, therefore,
+ no comparison was made.
+
+
+
+
+ To find out what has changed between two scans in more detail, select
+ the ones you are interested in and click Compare Selected
+ Scans. To delete scans that are no longer relevant, select
+ those and click on Remove Selected Scans. Scan
+ results can also be downloaded in CSV format.
+
+
+
+ Scan Details
+
+ The Scan Details page contains the results of a single scan. The page
+ is divided into two sections:
+
+
+
+ Details of the XCCDF Scan
+
+
+ This section displays various details about the scan, including:
+
+
+
+
+ File System Path: the path to the XCCDF file used for the scan.
+
+
+
+
+ Command-line Arguments: any additional command-line arguments that
+ were used.
+
+
+
+
+ Profile Identifier: the profile identifier used for the scan.
+
+
+
+
+ Profile Title: the title of the profile used for the scan.
+
+
+
+
+ Scan's Error output: any errors encountered during the scan.
+
+
+
+
+
+
+ XCCDF Rule Results
+
+
+ The rule results provide the full list of XCCDF rule identifiers,
+ identifying tags, and the result for each of these rule checks. This
+ list can be filtered by a specific result.
+
+
+
+
+
+
+ Schedule Page
+
+ Use the Schedule New XCCDF Scan page to schedule new scans for specific
+ machines. Scans occur at the system's next scheduled check-in that
+ occurs after the date and time specified. The following fields can be
+ configured:
+
+
+
+ Command-line Arguments:
+
+
+ Optional arguments to the oscap command, either:
+
+
+
+
+ --profile PROFILE: Specifies a particular
+ profile from the XCCDF document.
+
+
+ Profiles are determined by the Profile tag in the XCCDF XML file.
+ Use the oscap command to see a list of profiles
+ within a given XCCDF file, for example:
+
+# oscap info /usr/local/share/scap/dist_sles12_scap-sles12-oval.xml
+Document type: XCCDF Checklist
+Checklist version: 1.1
+Status: draft
+Generated: 2015-12-12
+Imported: 2016-02-15T22:09:33
+Resolved: false
+Profiles: SLES12-Default
+
+ If not specified, the default profile is used. Some early
+ versions of OpenSCAP in require that you use the
+ --profile option or the scan will fail.
+
+
+
+
+ --skip-valid: Do not validate input and output
+ files. You can use this option to bypass the file validation
+ process if you do not have well-formed XCCDF content.
+
+
+
+
+
+
+ Path to XCCDF Document:
+
+
+ This is a required field. The path parameter points to the XCCDF
+ content location on the client system. For example:
+ /usr/local/scap/dist_rhel6_scap-rhel6-oval.xml
+
+
+
+ The XCCDF content is validated before it is run on the remote
+ system. Specifying invalid arguments can cause
+ spacewalk-oscap to fail to validate or run. Due
+ to security concerns, the oscap xccdf eval
+ command only accepts a limited set of parameters.
+
+
+
+
+
+
+ For information about how to schedule scans using the web interface,
+ refer to .
+
+
+
+
+
+
+
+
+ Configuration
+
+
+
+ Only Configuration Administrators or SUSE Manager Administrators see the
+ Configuration tab.
+
+
+ In this configuration portal, manage systems with configuration files,
+ channels offering configuration files, and configartion files
+ themselves. Centrally-managed files are available to multiple
+ systems; locally-managed files are available to individual systems
+ only.
+
+
+
+ Preparing Systems for Config Management
+
+ changing email address
+
+ system preparation
+
+
+ To manage a system's configuration with SUSE Manager, it must have the
+ appropriate tools and the config-enable file
+ installed. These tools should be available if you installed the system
+ with the configuration management functionality using AutoYaST or
+ Kickstart. If not, they can be found in the Tools child channel for your
+ distribution. Download and install the latest
+ rhncfg* packages:
+
+
+
+
+
+ rhncfg — the base libraries and functions needed
+ by all rhncfg-* packages,
+
+
+
+
+ rhncfg-actions — the RPM package required to run
+ configuration actions scheduled via SUSE Manager,
+
+
+
+
+ rhncfg-client — the RPM package with a command
+ line interface to the client features of the Configuration Management
+ system,
+
+
+
+
+ rhncfg-management — the RPM package with a command
+ line interface used to manage SUSE Manager configuration.
+
+
+
+
+
+
+
+ Overview
+
+
+ The Configuration Overview shows all of the
+ configuration files that are managed by your organization in SUSE
+ Manager. This list includes files that are managed centrally in
+ configuration channels and files that are managed locally via individual
+ system profiles.
+
+
+
+
+ Configuration Summary
+
+
+ The panel provides quick information about your configuration files.
+ Click on the blue text to the right to display relevant systems,
+ channel details, or configuration files.
+
+
+
+
+ Configuration Actions
+
+
+ Configuration Actions offers direct access to the
+ most common configuration management tasks. View or create files and
+ channels or enable configuration management on your systems.
+
+
+
+
+ Recently Modified Configuration Files
+
+
+ The list shows which files have changed when and to which channel they
+ belong. If no files have been changed, no list appears. Click on the
+ name of a file to see its Details page. Click on
+ the channel name to see its Channel Details page.
+
+
+ File types that can appear here:
+
+
+
+
+ Spacewalk Icon Software Channels
+
+
+
+
+
+
+
+ — Centrally-managed configuration file
+ provided by a global configuration channel.
+
+
+
+
+ FA Desktop
+
+
+
+
+
+
+
+ — Locally-managed configuration file,
+ maybe overriding a centrally-managed file.
+
+
+
+
+ Spacewalk Icon Sandbox
+
+
+
+
+
+
+
+ — Sandbox configuration file.
+
+
+
+
+
+
+ Recently Scheduled Configuration File Deployments
+
+
+ Each scheduled action is listed along with the status of the action.
+ Any scheduled configuration task, from enabling configuration
+ management on a system to deploying a specific configuration file, is
+ displayed. Here you can quickly assess if all tasks have been
+ successfully carried out or fix any problems. Clicking on the blue
+ text displays the System
+ DetailsSchedule page for the
+ specified system.
+
+
+
+
+
+
+
+ Configuration Channels
+
+
+ configuration
+ create
+
+
+
+ actions
+
+ create
+
+ configuration
+
+ channel
+
+
+
+ As mentioned above, SUSE Manager manages both central and local
+ configuration channels and files. Central configuration management allows
+ you to deploy configuration files to multiple systems. Local
+ configuration management allows you to specify overrides or configuration
+ files that are not changed by subscribing the system to a central
+ channel.
+
+
+
+ Central configuration channels must be created via the link on this page.
+ Local configuration channels already exist for each system to which a
+ Provisioning entitlement has been applied.
+
+
+
+ Click on the name of the configuration channel to see the details page
+ for that channel. If you click on the number of files in the channel, you
+ are taken to the List/Remove Files page of that
+ channel. If you click on the number of systems subscribed to the
+ configuration channel, you are taken to the
+ SystemsSubscribed
+ Systems page for that channel.
+
+
+
+ To create a new central configuration channel:
+
+
+
+
+
+ Click the Create Config Channel link in the
+ upper right of this screen.
+
+
+
+
+ Enter a name for the channel.
+
+
+
+
+ Enter a label for the channel. This field must contain only
+ alphanumeric characters, "-", "_", and
+ ".".
+
+
+
+
+ Enter a mandatory description for the channel that allows you to
+ distinguish it from other channels. No character restrictions apply.
+
+
+
+
+ Press the Create Config Channel button to create the
+ new channel.
+
+
+
+
+ The following page is a subset of the Channel
+ Details page and has three subtabs:
+ Overview, Add Files, and
+ Systems. The Channel Details page
+ is discussed in
+ .
+
+
+
+
+
+ Configuration > Configuration Channels > Configuration Channel Details
+
+
+ Overview
+
+
+ The Overview subtab of the
+ Configuration Channel Details page is divided
+ into several panels.
+
+
+
+ Channel Information
+
+
+ The panel provides status information for the contents of the channel.
+
+
+
+
+ Configuration Actions
+
+
+ The panel provides access to the most common configuration
+ tasks.
+
+
+
+
+ Channel Properties
+
+
+ By clicking on the Edit Properties link,
+ you can edit the name, label, and description of the channel.
+
+
+
+
+
+
+
+ List/Remove Files
+
+
+ This subtab only appears if there are files in the configuration
+ channel. You can remove files or copy the latest versions into a set
+ of local overrides or into other central configuration channels.
+ Check the box next to files you wish to manipulate and click the
+ respective action button.
+
+
+
+
+ Add Files
+
+
+ The Add Files subtab has three subtabs of its own,
+ which allow you to Upload,
+ Import, or Create configuration
+ files to be included in the channel.
+
+
+
+ Upload File
+
+
+ To upload a file into the configuration channel, browse for the
+ file on your local system, populate all fields, and click the
+ Upload Configuration File button. The
+ Filename/Path field is the absolute path where
+ the file will be deployed.
+
+
+ You can set the Ownership via the user
+ name and group name as well as the
+ Permissions of the file when it is deployed.
+
+
+ If the client has SELinux enabled, you can configure
+ SELinux contexts to enable the required file
+ attributes (such as user, role, and file type) that allow it to be
+ used on the system.
+
+
+ If the configuration file includes a macro (a variable in a
+ configuration file), enter the symbol that marks the beginning
+ and end of the macro. For more information on using macros,
+ see .
+
+
+
+
+ Import Files
+
+
+ To import files from other configuration channels, including any
+ locally-managed channels, check the box to the left of any file
+ you wish to import. Then press the Import Configuration
+ File(s) button.
+
+
+
+ A sandbox icon (
+
+ Spacewalk Icon Sandbox
+
+
+
+
+
+
+
+ ) indicates that the listed file is
+ currently located in a local sandbox. Files in a system's
+ sandbox are considered experimental and could be
+ unstable. Use caution when selecting them for a central
+ configuration channel.
+
+
+
+
+
+ Create File
+
+
+ Create a configuration file, directory, or symbolic link from
+ scratch to be included in the configuration channel.
+
+
+ Creating a Configuration File, Directory, or Symbolic Link From
+ Scratch
+
+
+ Choose whether you want to create a text file, directory, or
+ symbolic link in the File Type section.
+
+
+
+
+ In the Filename/Path text input field,
+ set the absolute path to where the file should be deployed.
+
+
+
+
+ If you are creating a symlink, indicate the target file and
+ path in the Symbolic Link Target
+ Filename/Path input field.
+
+
+
+
+ Enter the User name and Group
+ name for the file in the
+ Ownership section, as well as the
+ File Permissions Mode.
+
+
+
+
+ If the client has SELinux enabled, you can configure
+ SELinux contexts to enable the required
+ file attributes (such as user, role, and file type) that
+ allow it to be used on the system.
+
+
+
+
+ If the configuration file includes a macro, enter the symbol
+ that marks the beginning and end of the macro.
+
+
+
+
+ Then enter the configuration file content in the
+ File Contents field, using the script
+ drop-down menu to choose the appropriate scripting language.
+
+
+
+
+ Press the Create Configuration File
+ button to create the new file.
+
+
+
+
+
+
+
+
+
+ Deploy Files
+
+
+ This subtab only appears when there are files in the channel and
+ a system is subscribed to the channel. Deploy
+ all files by clicking the Deploy All Files button
+ or check selected files and click the Deploy Selected
+ Files button. Select to which systems the file(s) should be
+ applied. All systems subscribed to this channel are listed. If you
+ wish to apply the file to a different system, subscribe it to the
+ channel first. To deploy the files, press Confirm & Deploy
+ to Selected Systems.
+
+
+
+
+ Systems
+
+
+ Manage systems subscribed to the configuration channel via two
+ subtabs:
+
+
+
+ Subscribed Systems
+
+
+ All systems subscribed to the current channel are displayed. Click
+ on the name of a system to see the System
+ Details page.
+
+
+
+
+ Target Systems
+
+
+ This subtab displays a list of systems enabled for configuration
+ management but not yet subscribed to the channel. To add a system
+ to the configuration channel, check the box to the left of the
+ system's name and press the Subscribe System
+ button.
+
+
+
+
+
+
+
+
+
+
+
+ Configuration Files
+
+
+ This dialog allows you to manage your configuration files independently.
+ Both centrally-managed and locally-managed files can be reached from
+ sub-pages.
+
+
+
+ Maximum Size for Configuration Files
+
+ By default, the maximum file size for configuration files is 128 KB
+ (131072 bytes). SUSE supports a configuration file size up to
+ 1 MB; larger values are not guaranteed to work.
+
+
+
+
+
+ 2010-12-21 - ke: will "rhn" stay here?
+ 2010-12-28 - kkaempf: "rhn" will stay here.
+
+ To change the file size limit, edit all the following files on the
+ SUSE Manager server and edit or add the following variables:
+
+
+ # /usr/share/rhn/config-defaults/rhn_web.conf
+web.maximum_config_file_size = 262144
+
+# /usr/share/rhn/config-defaults/rhn_server.conf
+maximum_config_file_size = 262144
+
+# /etc/rhn/rhn.conf
+web.maximum_config_file_size=262144
+server.maximum_config_file_size=262144
+
+
+ Then restart spacewalk:
+
+# spacewalk-service restart
+
+
+
+
+ Centrally-Managed Files
+
+ Centrally-managed files are available to multiple systems. Changing
+ a file within a centrally-managed channel may result in changes to
+ several systems. Locally-managed files supersede centrally-managed
+ files. For more information about locally-managed files, see .
+
+
+ This page lists all files currently stored in your central
+ configuration channel. Click on the Path of a
+ file to see its Details tab. Click the name of
+ the Configuration Channel to see the channel's
+ Overview tab. Clicking Systems
+ Subscribed shows you all systems currently subscribed to
+ the channel containing that file. Click Systems
+ Overriding to see all systems that have a local (or
+ override) version of the configuration file. The centrally-managed
+ file will not be deployed to those systems.
+
+
+
+
+ Locally-Managed Files
+
+ Locally-managed configuration files apply to only one system. They may
+ be files in the system's sandbox or files that can be deployed to the
+ system at any time. Local files have higher priority than
+ centrally-managed files. If a system is subscribed to a configuration
+ channel with a given file and also has a locally-managed version of that
+ file, the locally-managed version will be deployed.
+
+
+ The list of all local (override) configuration files for your systems
+ includes the local configuration channels and the sandbox channel for
+ each Provisioning-entitled system.
+
+
+ Click the Path of the file to see its Config
+ File Details. Click the name of the system to which it belongs
+ to see its System
+ DetailsConfigurationOverview
+ page.
+
+
+
+
+ Including Macros in your Configuration Files
+ within configuration Files
+ interpolation
+ macros
+ in configuration files
+
+ Being able to store one file and share identical configurations is
+ useful, but what if you have many variations of the same configuration
+ file? What do you do if you have configuration files that differ only in
+ system-specific details, such as host name and MAC address?
+
+
+ Traditional file management would require to upload and distribute each
+ file separately, even if the distinction is nominal and the number of
+ variations is in the hundreds or thousands. SUSE Manager addresses this
+ by allowing the inclusion of macros, or variables, within the
+ configuration files it manages. In
+ addition to variables for custom system information, the following
+ standard macros are supported:
+
+
+ 2010-12-21 - ke: will "rhn" stay here?
+ 2010-12-28 - ke: see bug 660807#c3
+
+
+
+
+ rhn.system.sid
+
+
+
+
+ rhn.system.profile_name
+
+
+
+
+ rhn.system.description
+
+
+
+
+ rhn.system.hostname
+
+
+
+
+ rhn.system.ip_address
+
+
+
+
+ rhn.system.custom_info(key_name)
+
+
+
+
+ rhn.system.net_interface.ip_address(eth_device)
+
+
+
+
+ rhn.system.net_interface.netmask(eth_device)
+
+
+
+
+ rhn.system.net_interface.broadcast(eth_device)
+
+
+
+
+ rhn.system.net_interface.hardware_address(eth_device)
+
+
+
+
+ rhn.system.net_interface.driver_module(eth_device)
+
+
+
+
+ To use this powerful feature, either upload or create a configuration
+ file via the Configuration Channel Details page. Then
+ open its Configuration File Details page and include
+ the supported macros of your choice. Ensure that the delimiters used to
+ offset your variables match those set in the Macro Start
+ Delimiter and Macro End Delimiter fields
+ and do not conflict with other characters in the file. We recommend that
+ the delimiters be two characters in length and must not contain the
+ percent (%) symbol.
+
+
+ For example, you may have a file applicable to all of your servers that
+ differs only in IP address and host name. Rather than manage a separate
+ configuration file for each server, you may create a single file, such
+ as server.conf, with the IP address and host name
+ macros included.
+
+hostname={| rhn.system.hostname |}
+ip_address={| rhn.system.net_interface.ip_address(eth0) |}
+
+
+ 2010-12-21 - ke: will "rhn" stay here?
+
+ Upon delivery of the file to individual systems, whether through a
+ scheduled action in the SUSE Manager Web interface or at the command
+ line with the SUSE Manager Configuration Client
+ (mgrcfg-client), the variables will be replaced with
+ the host name and IP address of the system as recorded in SUSE Manager's
+ system profile. In the above example configuration file the deployed
+ version resembles the following:
+
+hostname=test.example.domain.com
+ip_address=177.18.54.7
+
+ To capture custom system information, insert the key label into the
+ custom information macro
+ (rhn.system.custom_info). For instance, if you
+ developed a key labeled "asset" you can
+ add it to the custom information macro in a configuration file to
+ have the value substituted on any system containing it. The macro
+ would look like this:
+
+asset={@ rhn.system.custom_info(asset) @}
+
+ When the file is deployed to a system containing a value for that key,
+ the macro gets translated, resulting in a string similar to the
+ following:
+
+asset=Example#456
+
+ To include a default value, for instance if one is required to prevent
+ errors, you can append it to the custom information macro, like this:
+
+asset={@ rhn.system.custom_info(asset) = 'Asset #' @}
+
+ This default is overridden by the value on any system containing it.
+
+
+ Using the SUSE Manager Configuration Manager
+ (mgrcfg-manager) will not translate or alter files,
+ as this tool is system agnostic. mgrcfg-manager does
+ not depend on system settings. Binary files cannot be interpolated.
+
+
+
+
+
+ Systems
+
+
+ This page displays status information about your system in relation to
+ configuration. There are two sub-pages: Managed Systems
+ and Target Systems.
+
+
+
+ Managed Systems
+
+ By default the
+ ConfigurationManaged
+ Systems
+ page is displayed. The listed systems have been fully prepared for
+ configuration file deployment. The number of locally- and centrally-managed
+ files is displayed. Clicking the name of a system shows its
+ System
+ DetailsConfigurationOverview
+ page. Clicking on the number of local files takes you to the
+ System
+ DetailsConfigurationView/Modify
+ FilesLocally-Managed Files
+ page, where you manage which local (override) files apply to the system.
+ Clicking on the number of centrally-managed files takes you to the
+ System
+ DetailsConfigurationManage
+ Configuration ChannelsList/Unsubscribe from
+ Channels page. Here you unsubscribe from any
+ channels you wish.
+
+
+
+
+ Target Systems
+
+ Here you see the systems either not prepared for configuration file
+ deployment or not yet subscribed to a configuration channel. The table
+ has three columns. The first identifies the system name, the second
+ shows whether the system is prepared for configuration file deployment,
+ and the third lists the steps necessary to prepare the system. To
+ prepare a system, check the box to the left of the profile name then
+ press the Enable SUSE Manager Configuration
+ Management button. All of the preparatory steps that can be
+ automatically performed are scheduled by SUSE Manager.
+
+
+
+ You will have to perform some manual tasks to enable configuration file
+ deployment. Follow the on-screen instructions provided to assist with
+ each step.
+
+
+
+
+
+
+
+ Schedule
+
+
+
+ Schedule
+
+
+ WebLogic
+ Schedule
+
+ If you click the Schedule tab on the top navigation
+ bar, the Schedule category and links appear. These
+ pages enable you to track the actions carried out on your systems. An
+ action is a scheduled task to be performed on one or more client systems.
+ For example, an action can be scheduled to apply all patches to a system.
+ Actions can also be grouped into action chains to schedule them at the
+ same time in a particular order, for example to reboot a system after
+ deploying patches.
+
+
+ SUSE Manager keeps track of the following action types:
+
+
+
+
+ package alteration (installation, upgrade, and removal),
+
+
+
+
+ rollback package actions,
+
+
+
+
+ system reboots,
+
+
+
+
+ patch application,
+
+
+
+
+ configuration file alteration (deploy, upload, and diff),
+
+
+
+
+ hardware profile updates,
+
+
+
+
+ package list profile updates,
+
+
+
+
+ automated installation initiation,
+
+
+
+
+ service pack migrations,
+
+
+
+
+ remote commands.
+
+
+
+
+ Each page in the Schedule category represents an action
+ status.
+
+
+
+ Pending Actions
+
+
+ SUSE Manager Administrator
+ Pending Actions
+
+
+
+ As shown in , the Pending
+ Actions page appears by default when clicking
+ Schedule in the top navigation bar. It displays
+ actions not yet started or still in progress.
+
+
+
+
+ To cancel an action, select the action, and click Cancel
+ Actions, then Confirm.
+
+
+
+
+
+ Failed Actions
+
+ SUSE Manager Administrator
+
+ Failed Actions
+
+
+ Sometimes actions cannot be completed. If the action returns an error, it
+ is displayed here.
+
+
+
+
+ Completed Actions
+
+
+ SUSE Manager Administrator
+ Completed Actions
+
+
+
+ List of actions successfully carried out.
+
+
+
+ Archived Actions
+
+
+ SUSE Manager Administrator
+ Archived Actions
+
+
+
+ If you selected actions to store for review, they are displayed here and
+ can be deleted.
+
+
+
+
+ Action Chains
+
+
+ SUSE Manager Administrator
+ Action Chains
+
+
+
+ You can create action chains—that are grouped actions—for
+ example, in the
+ SoftwarePackages
+ or
+ SoftwarePackages
+ subtabs on a system details page (see or ) or in the
+ ConfigurationDeploy
+ Files subtab on a system details page (see
+ ).
+
+
+
+
+
+
+ All created action chains are displayed in . In the Action
+ Chain List you can click the label to view or edit an
+ Action Chain as displayed in .
+
+
+
+ In the top right corner is the
+ delete action chain link.
+
+ To add actions to an existing chain, pick up a
+ chainable
action (such as running a remote command)
+ from a system details page (see . Then check Add to Action
+ Chain and select an action chain from the pull-down list.
+ Confirm with Schedule.
+
+
+
+
+
+
+ To create a new action chain, configure the first action, then select
+ Add to Action Chain instead of Schedule
+ no sooner than. Click on the drop-down menu, enter a name,
+ and click Schedule to save the chain. Then proceed
+ to the next action and add it to the new chain.
+
+
+
+
+ Action chains can be edited via the
+ ScheduleAction
+ Chains page. Click on a chain name to see the
+ actions in the order they will be performed. The following tasks can be
+ carried out here:
+
+
+
+
+
+ Changing the order by dragging the respective action to the right
+ position and dropping it.
+
+
+
+
+ Deleting actions from the chain by clicking the delete
+ action link.
+
+
+
+
+ Inspecting the list of systems on which an action is run by clicking
+ the + sign.
+
+
+
+
+ Deleting a single system from an action by clicking the
+ delete system link.
+
+
+
+
+ Deleting the complete chain with the delete action
+ chain link in the top-left corner.
+
+
+
+
+ Changing the action chain label by clicking it.
+
+
+
+
+ Scheduling the action chain for execution after a certain date by
+ clicking the Save and Schedule button.
+
+
+
+
+
+ Unsaved Changes
+
+ If you leave the page without clicking either
+ Save or Save and Schedule all
+ unsaved changes will be discarded. In this case, a confirmation dialog
+ will pop up.
+
+
+
+
+ Currently you cannot add an action to an action chain from the
+ Edit section of the action chain details page
+ (. Once a
+ Chain is scheduled, the actions it contains will be displayed under
+ Schedule on the appropriate pages:
+ Pending Actions, Failed Actions
+ or Completed Actions, depending on the status. If
+ one action fails on a system no other actions from the same chain
+ will be executed on that systems. Due to technical limitations it is
+ not possible to reuse Action Chains.
+
+
+
+
+ Actions List
+
+
+ SUSE Manager Administrator
+ Actions List
+
+
+
+ On each action page, each row in the list represents a single scheduled
+ event or action that might affect multiple systems and involve various
+ packages. The list contains several columns of information:
+
+
+
+
+
+ Filter by Action — Enter a term to filter the
+ listed actions or use the check boxes in this column to select actions.
+ Then either add them to your selection list or archive them by clicking
+ Archive Actions. If you archive a pending action, it
+ is not canceled, but the action item moves from the Pending
+ Actions list to the Archived Actions list.
+
+
+
+
+ Action — Type of action to perform such as Patches
+ or Package Install. Clicking an action name shows its Action
+ Details page. Refer to
+ for more information.
+
+
+
+
+ Scheduled Time — The earliest day and time the
+ action will be performed.
+
+
+
+
+ Succeeded — Number of systems on which this action
+ was successfully carried out.
+
+
+
+
+ Failed — Number of systems on which this action
+ has been tried and failed.
+
+
+
+
+ In Progress — Number of systems on which this
+ action is taking place.
+
+
+
+
+ Total — Total number of systems on which this
+ action has been scheduled.
+
+
+
+
+
+
+ Action Details
+ SUSE Manager Administrator
+ Action Details
+
+ If you click on the name of an action, the Action
+ Details page appears. This page is split into the following
+ tabs.
+
+
+
+ Action Details >
+
+
+ Details
+
+
+ General information about the action. This is the first tab you see
+ when you click on an action. It displays the action type, scheduling
+ administrator, earliest execution, and notes.
+
+
+ Patch Advisory
+
+ Clicking the Patch Advisory takes you to the Patch
+ Details page. The Patch Advisory appears only if the
+ action is a patch. Refer to
+ for more information.
+
+
+
+
+
+ Action Details >
+
+
+ Completed Systems
+
+
+ List of systems on which the action has been successfully performed.
+ Clicking a system name displays its System Details
+ page. Refer to for more
+ information.
+
+
+
+
+ Action Details >
+
+
+ In Progress Systems
+
+
+ List of systems on which the action is now being carried out. To cancel
+ an action, select the system by marking the appropriate check box and
+ click the Unschedule Action button. Clicking a
+ system name shows its System Details page. Refer to
+ for more information.
+
+
+
+
+ Action Details >
+
+
+ Failed Systems
+
+
+ List of systems on which the action has failed. It can be rescheduled
+ here. Clicking a system name takes you to its System
+ Details page. Refer to
+ for more information.
+
+
+
+
+ Action Details >
+
+
+Package List
+
+
+ List of packages are associated with this action. The tab appears
+ only if the action is package related (installation, removal, etc.).
+
+
+
+
+
+ Users
+
+ users
+ WebLogic
+ Users
+
+ Only SUSE Manager administrators can see the Users tab
+ on the top navigation bar. If you click the tab, the
+ Users category and links appear. Here you grant and
+ edit permissions for those who administer your system groups. Click on a
+ Username in the user list to modify the user.
+
+
+ To add new users to your organization, click the Create
+ User link on the top right corner of the page. On the
+ Create User page, fill in the required values for the
+ new user.
+
+
+ Once all fields are completed, click the Create Login
+ button. SUSE Manager now sends an email to the specified address and takes
+ you back to the UsersUser
+ ListActive page. If you wish to
+ set permissions and options for the new user, click on the name in the
+ list. The User Details page for this user provides
+ several subtabs of options. Refer to
+ for detailed descriptions of
+ each subtab.
+
+
+
+ User List
+
+
+
+
+
+
+ User List > Active
+
+
+ The user list shows all active users on your SUSE Manager and displays
+ basic information about each user: username, real name, roles, and date
+ of their last sign in.
+
+
+
+ As shown in , each row in the
+ User List represents a user within your organization.
+ There are four columns of information for each user:
+
+
+
+
+
+ Username — The login name of the user. Clicking on
+ a username displays the User Details page for the
+ user. Refer to for more
+ information.
+
+
+
+
+ Real Name — The full name of the user (last name,
+ first name).
+
+
+
+
+ Roles — List of the user's privileges, such as
+ organization administrator, channel administrator and normal user.
+ Users can have multiple roles.
+
+
+
+
+ Last Sign In — Shows when the user last logged in
+ to SUSE Manager.
+
+
+
+
+
+
+
+
+
+ User List > Deactivated
+
+
+ The list of deactivated users also allows you to reactivate any of them.
+ Click the check box to the left of their name and click the
+ Reactivate button then the Confirm
+ button. Reactivated users retain the permissions and system group
+ associations they had when they were deactivated. Clicking a user name
+ shows the User Details page.
+
+
+
+
+ User List > All
+
+
+ The All page lists all users that belong to your
+ organization. In addition to the fields listed in the previous two
+ screens, the table of users includes a Status field.
+ This field indicates whether the user is Active or
+ Deactivated.
+ Click a username to see the User
+ Details page.
+
+
+
+ User Details
+ deactivate
+ user
+ deactivate
+ user (SUSE Manager only)
+
+ Clicking a username on a
+ UsersUser
+ List listing displays the User
+ Details page. Here SUSE Manager administrators manage the
+ permissions and activity of all the users. Here you can also delete
+ or deactivate users.
+
+
+ Users can be deactivated directly in the SUSE Manager Web interface.
+ SUSE Manager administrators can deactivate or delete users of their
+ organization. Users can deactivate their own accounts.
+
+
+ Users with SUSE Manager Administrator Role
+
+ Users with the SUSE Manager administrator role cannot be deactivated
+ until that role is removed from their account.
+
+
+
+ Deactivated users cannot log in to the SUSE Manager Web interface or
+ schedule any actions. Actions scheduled by a user prior to their
+ deactivation remain in the action queue. Deactivated users can be
+ reactivated by SUSE Manager administrators.
+
+
+ Irreversible Deletion
+
+ User deletion is irreversible; exercise it with caution. Consider
+ deactivating the user first in order to assess the effect deletion will
+ have on your infrastructure.
+
+
+
+ To deactivate a user:
+
+
+
+
+ Click a username to navigate to the User
+ Details tab.
+
+
+
+
+ Verify that the user is not a SUSE Manager administrator. If they are,
+ uncheck the box to the left of that role and click the
+ Submit button.
+
+
+
+
+ Click the Deactivate User link in the upper right
+ corner of the dialog.
+
+
+
+
+ Click the Deactivate User button
+ to confirm.
+
+
+
+
+ To delete a user:
+
+
+
+
+ Click a username to navigate to the User
+ Details tab.
+
+
+
+
+ Verify that the user is not a SUSE Manager administrator. Uncheck the
+ box to remove the role if necessary.
+
+
+
+
+ Click the Delete User link in the upper right
+ corner of the dialog.
+
+
+
+
+ Click the Delete User button to permanently delete
+ the user.
+
+
+
+
+ For instructions to deactivate your own account, refer to
+ .
+
+
+ User Details > Details
+ changing email address
+ changing password
+ email address
+ changing
+ SUSE Manager administrator
+ user roles
+
+ The Details tab, which displays the username,
+ first name, last name, email address, roles of a user, and other
+ details about the user. Edit this information as needed and then
+ confirm with Update. When changing a user's
+ password, you will only see asterisks as you type.
+
+
+ To delegate responsibilities within your organization, SUSE Manager
+ provides several roles with varying degrees of access. This list
+ describes the permissions of each role and the differences between
+ them:
+
+
+
+
+ User (normal user) — Also known as a
+ System Group User, this is the standard role
+ associated with any newly created user. This person may be
+ granted access to manage system groups and software channels, if
+ the SUSE Manager administrator sets the roles accordingly. The
+ systems must be in system groups for which the user has
+ permissions to manage them. However, all globally subscribable
+ channels may be used by anyone.
+
+
+
+
+ SUSE Manager Administrator — This role allows a
+ user to perform any function available in SUSE Manager. As the master
+ account for your organization, the person holding this role can alter
+ the privileges of all other accounts of this organization, as well as conduct any of the
+ tasks available to the other roles. Like with other roles, multiple
+ SUSE Manager administrators may exist. Go to
+ AdminUsers
+ and click the check box in the SUSE Manager Admin?
+ row. For more information, see .
+
+
+ A SUSE Manager Administrator can create foreign
+ organizations; but a SUSE Manager
+ Administrator can only create users for an organization if
+ he is entitled with organization administrator privileges for this
+ organization.
+
+
+
+
+ Organization Administrator — This role provides
+ a user with all the permissions other administrators have, namely the
+ activation key, configuration, channel, and system group
+ administrator. Organization Administrator is
+ not entitled to perform actions that belong to the
+ Admin tab (see ).
+
+
+
+
+ Activation Key Administrator — This role is
+ designed to manage your collection of activation keys. A user
+ assigned to this role can modify and delete any key within your
+ organization.
+
+
+
+
+ Configuration Administrator — This role enables
+ a user to manage the configuration of systems within the
+ organization, using either the SUSE Manager Web interface or tool
+ from the the
+ rhncfg-management package.
+ ke
+ 2011-03-29: was "Red Hat Network Configuration Manager"
+
+
+
+
+ Channel Administrator — This role provides a
+ user with full access to all software channels within your
+ organization. This requires the SUSE Manager synchronization tool
+ (mgr-sync from the
+ susemanager-tools package). The channel
+ administrator may change the base channels of systems, make
+ channels globally subscribable, and create entirely new channels.
+
+
+
+
+ System Group Administrator — This role limits
+ authority to systems or system groups to which access is
+ granted. The System Group Administrator can create new system
+ groups, delete any assigned systems from groups, add systems to
+ groups, and manage user access to groups.
+
+
+
+
+ Being a SUSE Manager administrator enables you to remove administrator
+ rights from other users. It is possible to remove your own privileges
+ as long as you are not the only SUSE Manager administrator.
+
+
+ To assign a new role to a user, check the respective box. SUSE Manager
+ administrators are automatically granted administration access to all
+ other roles, signified by grayed-out check boxes. Click
+ Update to submit your changes.
+
+
+
+ User Details > System Groups
+
+ This tab displays a list of system groups the user may administer;
+ for more information about system groups, see . SUSE Manager
+ administrators can set this user's access permissions to each
+ system group. Check or uncheck the box to the left of the system
+ group and click the Update Permissions button to
+ save the changes.
+
+
+ SUSE Manager administrators may select one or more default system
+ groups for a user. When the user registers a system, it gets assigned
+ to the selected group or groups. This allows the user to access the
+ newly-registered system immediately. System groups to which this user
+ has access are preceded by an (*).
+
+
+
+ User Details > Systems
+
+ This tab lists all systems a user can access according to the system
+ groups assigned to the user. To carry out tasks on some of these
+ systems, select the set of systems by checking the boxes to the left
+ and click the Update List button. Use the System Set
+ Manager page to execute actions on those systems. Clicking the name of
+ a system takes you to its System Details page. Refer
+ to for more information.
+
+
+
+ User Details > Channel Permissions
+
+ This tab lists all channels available to your organization. Grant
+ explicit channel subscription permission to a user for each of the
+ channels listed by checking the box to the left of the channel, then
+ click the Update Permissions button. Permissions
+ granted by a SUSE Manager administrator or channel administrator have
+ no check box but a check icon just like globally subscribable channels.
+
+
+ User Details > Channel Permissions > Subscription
+
+ Identifies channels to which the user may subscribe systems. To change
+ these, select or deselect the appropriate check boxes and click the
+ Update Permissions button. Note that channels
+ subscribable due to the user's administrator status or the channel's
+ global settings cannot be altered. They are identified with a check
+ icon.
+
+
+
+ User Details > Channel Permissions > Management
+
+ Identifies channels the user may manage. To change these, select or
+ deselect the appropriate check boxes and click the Update
+ Permissions button. The permission to manage channels does
+ not enable the user to create new channels. Note that channels
+ automatically manageable through the user's admin status cannot be
+ altered. These channels are identified with a check icon. Remember,
+ SUSE Manager administrators and channel administrators can subscribe
+ to or manage any channel.
+
+
+
+
+ User Details > Preferences
+
+ Configure the following settings for the user:
+
+
+
+
+ Email Notifications: Determine whether this user
+ should receive email every time a patch alert is applicable to one or
+ more systems in his or her SUSE Manager account, as well as daily
+ summaries of system events.
+
+
+
+
+
+ SUSE Manager List Page Size: Maximum number of
+ items that appear in a list on a single page. If the list contains
+ more items than can be displayed on one page, click the
+ Next button to see the next page. This preference
+ applies to the user's view of system lists, patch lists, package
+ lists, and so on.
+
+
+
+
+ Overview Start Page: Configure which information
+ to be displayed on the Overview
page at login.
+
+
+
+
+ CSV Files: Select whether to use the default comma
+ or a semicolon as separator in downloadable CSV files.
+
+
+
+
+ Change these options to fit your needs, then click the Save
+ Preferences button. To change the time zone for this user,
+ click on the Locale subtab and select from the
+ drop-down menu. Dates and times, like system check-in times, will be
+ displayed according to the selected time zone. Click Save
+ Preferences for changes to take effect.
+
+
+
+ User Details > Addresses
+
+ This tab lists mailing addresses associated with the user's account. If
+ there is no address specified yet, click Fill in this
+ address and fill out the form. When finished, click
+ Update. To modify this information, click the
+ Edit this address link, change the relevant
+ information, and click the Update button.
+
+
+
+
+
+
+
+
+ System Group Configuration
+
+ System Groups help when diferrent users shall administer different
+ groups of systems within one organization.
+
+
+ System Group Configuration > Configuration
+ Enable Create a user default System
+ Group and confirm with Update.
+
+ Assign such a group to systems via the
+ GroupsJoin
+ subtab of systems details page. For more information, see or .
+
+
+
+ System Group Configuration > External Authentication
+
+ Allows to create an external group with the Create External
+ Group link.
+
+
+ Users can join such groups via the System Groups
+ of the user details page, then check the wanted
+ Group, and confirm with Update
+ Permissions. For more information, see .
+
+
+
+
+
+ Admin
+
+
+ The Admin page allows SUSE Manager customers to manage
+ the basic configuration, including creating and managing multiple
+ organizations. Only the SUSE Manager administrator can access the
+ Admin page.
+
+
+
+ Admin > Setup Wizard
+
+
+ Setting up SUSE Manager typically requires some extra steps after
+ installation for common configuration tasks.
+
+
+
+ The Setup Wizard link is displayed when the
+ SUSE Manager Web interface is used for the fist time and can be accessed
+ later at any time by clicking
+ AdminSetup
+ Wizard. On the three tabs configure the HTTP proxy
+ server, organization credentials, and SUSE products.
+
+
+
+
+ HTTP Proxy:
+
+
+ Configure a proxy server that SUSE Manager will use to access SCC (SUSE Customer Center)
+ and other remote servers here. Use hostname:port syntax in the
+ Hostname field if the proxy port is not 8080. Clearing the fields disables
+ proxy.
+
+
+
+ Organization Credentials:
+
+
+
+ Click Add a new credential and enter
+ username and password to allow a user to access SCC. After saving, a
+ new credential card will be displayed. Buttons below the credential
+ card allow you to:
+
+
+
+
+ see the credential validation status (green tick or red cross icon).
+ To re-check the credential with SCC, click the icon;
+
+
+
+
+ set the primary credentials for inter-server synchronization (yellow
+ star icon);
+
+
+
+
+ list the subscriptions related to a certain credential (list icon);
+
+
+
+
+ edit the credential (pencil icon);
+
+
+
+
+ delete the credential (trash can icon).
+
+
+
+
+
+
+ SUSE Products:
+
+
+
+ On the SUSE Products page, select
+ product-specific channels you are entitled to. The products
+ displayed are directly linked to your organization credentials as
+ well as your SUSE subscriptions. Product extentions and modules
+ are shown when you select the corresponding base product or click
+ the plus sign to its left. After you have made your selection,
+ click Add products. This equals running the
+ mgr-sync add products or just
+ mgr-sync command.
+
+
+
+ Channel synchronization will start and might take several hours.
+ When finished the corresponding channels can be used in SUSE Manager.
+
+
+
+ Alternatively, you can add listed channels right away by clicking
+ the Add this product button in the status
+ column. A progress bar will be displayed. The main product will
+ expand, and now you can select add-on products belonging to the
+ product that is currently added. To check for required channels,
+ click the list icon in the Channels
+ column. Once a product is downloaded and ready to use, the state
+ will change to Finished.
+
+
+
+
+
+
+
+
+
+ Admin > Organizations
+
+
+ The organizations feature allows SUSE Manager administrators to create and
+ manage multiple organizations across SUSE Manager. Administrators can
+ allocate software and system entitlements across various organizations,
+ as well as control an organization's access to system management tasks.
+
+
+
+
+
+
+
+ If you click the name of an organization, the Organization Details
+ page appears.
+
+
+
+
+
+
+ Organization Details > Details
+
+ This screen lists the details of an organization.
+
+
+
+
+ Organization Details > Users
+
+ List of all the users of an organization. You can can modify the user
+ details if you are logged into that organization and have organization
+ administrator privileges.
+ For mor information, see .
+
+
+
+
+
+ Organization Details > Trust
+
+ Here establish trust between organizations. Such a trust
+ allows to share data.
+
+
+
+
+
+ Organization Details > Configuration
+
+ Here you configure the organization to use staged contents
+ (pre-fetching
packages, etc.), set up software crash
+ reporting, and upload of SCAP files.
+
+
+
+
+ The clients will download packages in advance and stage them. This
+ has the advantage that the package installation action will take
+ place immediately, when the schedule is actually executed. This
+ pre-fetching
saves maintenance window time, which is
+ good for service uptime.
+
+
+ For staging contents (pre-fetching
), edit on the client
+ /etc/sysconfig/rhn/up2date:
+
+stagingContent=1
+stagingContentWindow=24
+
+ stagingContentWindow is a time value expressed in
+ hours and determines when downloading will start. It
+ is the number of hours before the scheduled installation or update time.
+ In this case, it means 24 hours before the
+ installation time. The exact download
+ start time depends on the contact method—when the next
+ rhn_check is performed.
+
+
+ Next time an action is scheduled, packages will automatically be
+ downloaded but not installed yet. When the scheduled time comes, the
+ action will use the staged version.
+
+
+
+ Organization Details > States
+
+
+
+
+
+
+
+
+
+
+ Admin > Users
+
+
+ To view and manage all users of the organization you are currently logged
+ in to, click Users in the left navigation bar. The
+ table lists username, real name, organization and whether the user is
+ organization or SUSE Manager administrator. To modify administrator
+ privileges, click the username to get to the user's
+ Details page.
+
+
+ emap 2014-05-09: Commented description of ext. auth tab description since it will be disabled for 2.1 release. Possible used in future versions.
+
+
+
+
+
+
+
+
+
+
+
+ Admin > SUSE Manager Configuration
+
+
+ This tab is split into subtabs that allow you to configure most aspects
+ of SUSE Manager.
+
+
+
+ Admin > SUSE Manager Configuration > General
+
+ This page allows you to alter the most basic settings.
+
+
+
+
+
+ Administrator Email Address
+
+
+
+ E-mail address of the SUSE Manager administrator.
+
+
+
+
+
+
+ SUSE Manager Hostname
+
+
+
+ Hostname of the SUSE Manager server.
+
+
+
+
+
+ SUSE Manager Proxy Configuration
+
+
+ HTTP proxy, HTTP proxy
+ username, HTTP proxy password, and
+ Confirm HTTP proxy password.
+
+
+ The HTTP proxy settings are for the communication with a SUSE Manager
+ parent server, if there is any. The HTTP proxy should be of the
+ form: hostname:port; the default port
+ 8080 will be used if none is explicitly
+ provided. HTTP proxy settings for client systems to connect to
+ this SUSE Manager can be different, and will be configured
+ separately, for example via .
+
+
+
+
+
+
+ RPM repository mount point
+
+
+
+
+
+
+
+
+ Default To SSL
+
+
+
+ For secure communication, use SSL.
+
+
+
+
+
+
+ When done, confirm with Update.
+
+
+
+
+
+
+
+
+
+
+
+ Admin > SUSE Manager Configuration > Bootstrap Script
+
+ The SUSE Manager
+ ConfigurationBootstrap Script
+ page allows you to generate a bootstrap script that registers the
+ client systems with SUSE Manager and disconnects them
+ from the remote Novell Customer Center. This script is to be
+ placed in the /srv/www/htdocs/pub/bootstrap/
+ directory of SUSE Manager. It significantly reduces the effort involved in
+ reconfiguring all systems, which by default obtain packages from the
+ remote Novell Customer Center. The required fields are pre-populated with values
+ derived from previous installation steps. Ensure this information is
+ accurate.
+
+
+ Check boxes offer options for including built-in security SSL and
+ GNU Privacy Guard (GPG) features, both of which are advised. In
+ addition, you may enable remote command acceptance and remote
+ configuration management of the systems to be bootstrapped to the
+ SUSE Manager. Both features are useful for completing client
+ configuration. Finally, if you are using an HTTP proxy server, fill
+ in the related fields. When finished, click
+ Update.
+
+
+
+
+ Admin > SUSE Manager Configuration > Organizations
+
+ The SUSE Manager
+ ConfigurationOrganizations
+ page contains details about the organizations feature of SUSE Manager,
+ as well as links to quickly get started creating and configuring
+ organizations.
+
+
+
+
+
+ Admin > SUSE Manager Configuration > Restart
+
+ The SUSE Manager
+ ConfigurationRestart page
+ comprises the final step in configuring SUSE Manager. Click the
+ Restart button to restart SUSE Manager and
+ incorporate all of the configuration options added on the previous
+ screens. It will take between four and five minutes for the restart to
+ finish.
+
+
+
+
+ Admin > SUSE Manager Configuration > Cobbler
+
+ On the SUSE Manager
+ ConfigurationCobbler page you
+ can run the Cobbler Sync by clicking Update. Cobbler
+ Sync is used to repair or rebuild the contents of
+ /srv/tftpboot or
+ /srv/www/cobbler when a manual modification of the
+ cobbler setup has occurred.
+
+
+
+
+
+ Admin > SUSE Manager Configuration > Bare-metal systems
+
+ Here you can add unprovisioned ("bare-metal") systems capable of PXE
+ booting to an organization. First click Enable adding to
+ this organization.
+ Those systems then will
+ appear in the Systems list, where regular
+ provisioning via autoinstallation is possible in a completely unattended
+ fashion. Only x86_64 systems with at least 1 GB of RAM are supported.
+ SUSE Manager server will use its integrated Cobbler instance and will
+ act as TFTP server for this feature to work, so the network segment that
+ connects it to target systems must be properly configured. In
+ particular, a DHCP server must exist and have a next-server
+ configuration parameter set to the SUSE Manager server IP address or
+ hostname.
+
+
+ Once enabled, any bare-metal system connected to the SUSE Manager server
+ network will be automatically added to the organization when it powers
+ on. The process typically takes a few minutes; when it finishes, the
+ system will automatically shut down and then appear in the
+ Systems list.
+
+
+
+ Note that new systems will be added to the organization of the
+ administrator who enabled this feature. To change the organization,
+ disable the feature, log in as an administrator of a different
+ organization and enable it again.
+
+
+
+ Provisioning can be initiated by clicking the Provisioning tab. In
+ case of bare-metal systems, though, provisioning cannot be scheduled, it
+ will happen automatically as soon as it is completely configured and the
+ system is powered on.
+
+
+ It is possible to use System Set Manager with
+ bare-metal systems, although in that case some features will not be
+ available as those systems do not have an operating system installed.
+ This limitation also applies to mixed sets with regular and bare-metal
+ systems: full features will be enabled again once all bare-metal systems
+ are removed from the set.
+
+
+
+
+
+ Admin > ISS Configuration
+
+
+ Inter-Server Synchronization (ISS) allows a SUSE Manager to synchronize
+ content and permissions from another SUSE Manager instance in a
+ peer-to-peer relationship.
+
+
+
+
+ Configuring the Master SUSE Manager Server
+
+ Click Admin>ISS
+ ConfigurationMaster Setup. In
+ the top right-hand corner of this page, click Add New
+ Slave and fill in the following information:
+
+
+
+
+ Slave Fully Qualified Domain Name (FQDN)
+
+
+
+
+ Allow Slave to Sync? — Choosing this field will allow the slave
+ SUSE Manager to access this master SUSE Manager. Otherwise, contact
+ with this slave will be denied.
+
+
+
+
+ Sync All Orgs to Slave? — Checking this field will synchronize all
+ organizations to the slave SUSE Manager.
+
+
+
+
+
+ Choosing the Sync All Orgs to Slave? option on the
+ Master Setup page will override any specifically
+ selected organizations in the local organization table.
+
+
+
+ Click Create. Optionally, click any local
+ organization to be exported to the slave SUSE Manager then click
+ Allow Orgs.
+
+
+ To enable the inter-server synchronization (ISS) feature, edit the
+ /etc/rhn/rhn.conf file and set:
+ disable_iss=0. Save the file and restart the httpd
+ service with service httpd restart.
+
+
+
+
+ Configuring Slave Servers
+
+ Slave servers receive content synchronized from the master server. To
+ securely transfer content to the slave servers, the ORG-SSL certificate
+ from the master server is needed. Click
+ AdminISS
+ ConfigurationSlave Setup. In
+ the top right-hand corner, click Add New Master and
+ fill in the following information:
+
+
+
+
+ Master Fully Qualified Domain Name (FQDN)
+
+
+
+
+ Default Master?
+
+
+
+
+ Filename of this Master's CA Certificate: use the full path to the CA
+ Certificate.
+
+
+
+
+ Click Add New Master.
+
+
+
+ Once the master and slave servers are configured, start the
+ synchronization with executing the mgr-inter-sync
+ command:
+
+mgr-inter-sync -c YOUR-CHANNEL
+
+
+
+
+
+
+
+
+ Mapping SUSE Manager Master Server Organizations to Slave Organizations
+
+ A mapping between organizational names on the master SUSE Manager allows
+ for channel access permissions to be set on the master server and
+ propagated when content is synced to a slave SUSE Manager. Not all
+ organization and channel details need to be mapped for all slaves.
+ SUSE Manager administrators can select which permissions and
+ organizations can be synchronized by allowing or omitting mappings.
+
+
+ To complete the mapping, log in to the Slave SUSE Manager as
+ administrator. Click AdminISS
+ ConfigurationSlave Setup and
+ select a master SUSE Manager by clicking its name. Use the drop-down
+ box to map the exported master organization name to a matching local
+ organization in the slave SUSE Manager, then click Update
+ Mapping.
+
+
+ On the command line, issue the synchronization command on each of the
+ custom channels to obtain the correct trust structure and channel
+ permissions:
+
+mgr-inter-sync -c YOUR-CHANNEL
+
+
+
+
+ Admin > Task Schedules
+
+
+ Under Task Schedules all predefined task bunches
+ are listed. Click a Schedule name to open its
+ Basic Schedule Details where you disable it or
+ change the frequency.
+
+
+ Click Edit Schedule to update the schedule with
+ your settings. To delete a schedule, click delete
+ schedule in the upper right-hand corner.
+
+
+
+
+ Only disable or delete a schedule if you are absolutely certain this is
+ necessary as they are essential for SUSE Manager to work properly.
+
+
+
+
+ If you click a bunch name, a list of runs of that bunch type and their
+ status will be displayed. Clicking the start time links takes you back
+ to the Basic Schedule Details.
+
+
+
+
+ For example, the following predefined task bunches are scheduled by
+ default and can be configured:
+
+
+
+
+
+ channel-repodata-default:
+
+
+
+ (re)generates repository metadata files.
+
+
+
+
+ cleanup-data-default:
+
+
+
+ cleans up stale package change log and monitoring time series data
+ from the database.
+
+
+
+
+ clear-taskologs-default:
+
+
+
+ clears task engine (taskomatic) history data older than a specified
+ number of days, depending on the job type, from the database.
+
+
+
+
+ cobbler-sync-default:
+
+
+
+ syncs distribution and profile data from SUSE Manager to Cobbler.
+
+
+
+
+
+ compare-configs-default:
+
+
+
+ compares configuration files as stored in configuration channels with
+ the files stored on all configuration-enabled servers. To review
+ comparisons, click the Systems tab and click
+ the system of interest. Go to
+ ConfigurationCompare Files.
+ For more information, refer to
+ .
+
+
+
+
+ cve-server-channels-default:
+
+
+
+ updates internal pre-computed CVE data that is used to display results
+ on the CVE Audit page. Search results in the
+ CVE Audit page are updated to the last run of this
+ schedule). For more information, see
+ .
+
+
+
+
+ daily-status-default:
+
+
+
+ sends daily report emails to relevant addresses. See
+ to learn more about how
+ to configure notifications for specific users.
+
+
+
+
+ errata-cache-default:
+
+
+
+ updates internal patch cache database tables, which are used to look
+ up packages that need updates for each server. Also, this sends
+ notification emails to users that might be interested in certain
+ patches. For more information on patches, see
+ .
+
+
+
+
+ errata-queue-default:
+
+
+
+ queues automatic updates (patches) for servers that are configured to
+ receive them.
+
+
+
+
+ kickstart-cleanup-default:
+
+
+
+ cleans up stale kickstart session data.
+
+
+
+
+ kickstartfile-sync-default:
+
+
+
+ generates Cobbler files corresponding to Kickstart profiles created by
+ the configuration wizard.
+
+
+
+
+
+ mgr-register-default:
+
+
+
+ calls the mgr-register command, which synchronizes
+ client registration data with NCC (new, changed or deleted clients'
+ data are forwarded).
+
+
+
+
+ mgr-sync-refresh-default:
+
+
+
+
+ the default time at which the start of syncing with SUSE Customer
+ Center (SCC) takes place (mgr-sync-refresh).
+
+
+
+
+ package-cleanup-default:
+
+
+
+ deletes stale package files from the file system.
+
+
+
+
+ reboot-action-cleanup-default:
+
+
+
+ any reboot actions pending for more than six hours are marked as
+ failed and associated data is cleaned up in the database. For more
+ information on scheduling reboot actions, see
+ .
+
+
+
+
+ sandbox-cleanup-default:
+
+
+
+ cleans up sandbox configuration files and channels
+ that are older than the sandbox_lifetime
+ configuration parameter (3 days by default). Sandbox files are those
+ imported from systems or files under development. For more
+ information, see
+
+
+
+
+
+ session-cleanup-default:
+
+
+
+ cleans up stale Web interface sessions, typically data that is
+ temporarily stored when a user logs in and then closes the browser
+ before logging out.
+
+
+
+
+ ssh-push-default:
+
+
+
+ prompts clients to check in with SUSE Manager via SSH if they are
+ configured with a SSH Push contact method.
+
+
+
+
+
+
+
+
+
+ Admin > Task Engine Status
+
+
+ This is a status report of the various tasks running by the SUSE Manager
+ task engine. Next to the task name you find the date and time of the
+ last execution and the status.
+
+
+
+
+ Admin > Show Tomcat Logs
+
+
+ Here the SUSE Manager Admin user has access to the Tomcat log file
+ (/var/log/rhn/rhn_web_ui.log). This way, no
+ root privileges are required.
+
+
+
+
+
+
+
+
+ Help
+
+ Help Desk
+ WebLogic
+ Help
+
+ The Help pages provide access to the full suite of
+ documentation and support available to SUSE Manager users.
+
+
+ SUSE Manager Getting Started Guide
+
+
+ In ↑“Getting Started” find information
+ regarding
+ SUSE Manager server and its installation and initial configuration.
+ Implementing a fully functional SUSE Manager requires more than
+ installing software and a database. Client systems must be configured to
+ use SUSE Manager. Custom packages and channels should be created for
+ optimal use. Since these tasks extend beyond the basic installation, they
+ are covered in detail in the other guides.
+
+
+
+ SUSE Manager Reference Guide
+
+
+
+ explains the Web interface and its features in detail.
+
+
+
+
+ SUSE Manager Best Practices Guide
+
+
+ ↑“Best Practices” describes SUSE
+ recommended best practices for SUSE Manager. This information has been
+ collected from a large number of successful SUSE Manager real world
+ implementations and includes feedback provided by product management,
+ sales, and engineering.
+
+
+
+
+
+ SUSE Manager Advanced Topics Guide
+
+
+ ↑“Advanced Topics” contains a collection of
+ advanced topics not covered under the best practices guide.
+
+
+
+
+
+
+ Release Notes
+
+
+ The Release Notes page lists the notes accompanying
+ every recent release of SUSE Manager. All significant changes occurring
+ in a given release cycle, from major enhancements to the user interface
+ to changes in the related documentation are documented here.
+
+
+
+ API
+
+ Documentation for using the Application Programming
+ Interface (API) for creating tools and programs to automate common tasks
+ via SUSE Manager.
+
+
+ The API page contains an overview of the API, with
+ links to detailed descriptions of various API calls available to
+ administrators and developers. There is also an FAQ
+ page for answers to common questions about the SUSE Manager API.
+ A Sample Scripts page shows example code using API calls.
+
+
+
+
+ Search
+
+
+ The Documentation Search page features a robust search
+ engine that indexes and searches SUSE Manager documentation.
+
+
+
+
+
+ Users can search the available online documentation and filter them
+ according to the following choices in the What to
+ Search drop-down menu:
+
+
+
+
+
+ Content & Title — Search both the title
+ heading or body content of all available documents.
+
+
+
+
+ Free Form — Search documents and indices for any
+ keyword matches, which broadens search results.
+
+
+
+
+ Content — Search only the body content of
+ documentation for more specific matches.
+
+
+
+
+ Title — Search only the title headings of the
+ documentation for targeted, specific search results.
+
+
+
+
+
+ The Free Form field additionally allows you to search
+ using field names that you prepend to search queries and filter results
+ in that field.
+
+
+
+ For example, if you wanted to search all of the SUSE Manager manuals for
+ the word Virtualization in the title and
+ install in the content, type the following in the
+ Free Form field:
+
+
+title:Virtualization and content:install
+
+
+ Other supported field names for documentation search include:
+
+
+
+
+
+ url — Search the URL for a particular keyword.
+
+
+
+
+ title — Search titles for a particular keyword.
+
+
+
+
+ content — Search the body of the documentation for
+ a particular keyword.
+
+
+
+
+
+ If there are several pages of search results, you can limit the amount of
+ visible results shown on one page by clicking the Display
+ quantity items per page drop-down menu, which offers between 10
+ and 500 results per page.
+
+
+ To move between pages, click the right or left angle brackets
+ (> to go forward or < to
+ go backward).
+
+
+
+
+
diff --git a/geekodoc/tests/run-tests.sh b/geekodoc/tests/run-tests.sh
new file mode 100755
index 0000000..3984389
--- /dev/null
+++ b/geekodoc/tests/run-tests.sh
@@ -0,0 +1,126 @@
+#!/bin/bash
+#
+# Run test cases
+#
+# Author: Thomas Schraitle
+# Date: December 2016
+
+VALIDATOR="xmllint"
+PROG=${0##*/}
+PROGDIR=${0%/*}
+# Needed to be able to run it from different directories
+SCHEMA=${PROGDIR}/../rng/geekodoc5-flat.rng
+SCHEMA=$(readlink -f ${SCHEMA})
+ERRORS=0
+
+function validate_with_jing {
+ local _RNG=$1
+ local _XML=$2
+ local _ERROR=${2%*.xml}.err
+ jing $_RNG $_XML >$_ERROR
+ echo $?
+}
+
+function validate_with_xmllint {
+ local _RNG=$1
+ local _XML=$2
+ local _ERROR=${2%*.xml}.err
+ xmllint --noout --relaxng $_RNG $_XML 2>$_ERROR
+ echo $?
+}
+
+function validator {
+ case "$VALIDATOR" in
+ "xmllint")
+ validate_with_xmllint "$1" "$2"
+ ;;
+ "jing")
+ validate_with_jing "$1" "$2"
+ ;;
+ *)
+ echo "Wrong validator: $VALIDATOR" 1>&2
+ ;;
+ esac
+}
+
+function print_help {
+ cat </dev/stderr
+ exit 10
+ fi
+ shift 2
+ ;;
+ --)
+ shift
+ break
+ ;;
+ esac
+done
+
+echo "Using validator '$VALIDATOR'..."
+
+# Cleanup any *.err files first...
+rm -f $PROGDIR/*.err 2>/dev/null
+
+# Iterating over all XML files inside this directory...
+for xmlfile in $PROGDIR/*.xml; do
+ result=$(validator $SCHEMA $xmlfile )
+ if [[ $result = '0' ]]; then
+ RESULTSTR="\e[1;32mPASSED\e[0m"
+ else
+ RESULTSTR="\e[1;31mFAILED\e[0m"
+ ERRORS=$(($ERROR + 1))
+ fi
+ echo -e "Validating '$xmlfile'... $RESULTSTR"
+ if [[ $result != '0' ]]; then
+ cat "${xmlfile%*.xml}.err" 1>&2
+ echo "----------------------------------------------"
+ fi
+done
+
+echo
+if [[ $ERRORS -eq 0 ]]; then
+ echo -e "Found\e[1;32m $ERRORS errors\e[0m. Congratulations! :-)"
+else
+ echo -e "Found\e[1;31m $ERRORS error(s)\e[0m. :-("
+ exit 1
+fi
+
+# Remove any error files which are zero bytes, but keep the ones which
+# contains error messages
+for errfile in $PROGDIR/*.err; do
+ [[ -s $errfile ]] || rm $errfile 2>/dev/null
+done
+
+exit 0
diff --git a/geekodoc/xsl/sch-fix.xsl b/geekodoc/xsl/sch-fix.xsl
new file mode 100644
index 0000000..3aa721f
--- /dev/null
+++ b/geekodoc/xsl/sch-fix.xsl
@@ -0,0 +1,41 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/novdoc/tests/article-admons.xml b/novdoc/tests/article-admons.xml
new file mode 100644
index 0000000..1e550f8
--- /dev/null
+++ b/novdoc/tests/article-admons.xml
@@ -0,0 +1,21 @@
+
+
+
+ Test Article for Admonitions
+
+ Note Title
+ bla
+
+
+ Important Title
+ bla
+
+
+ Tip Title
+ bla
+
+
+ Warning Title
+ bla
+
+
diff --git a/novdoc/tests/article-base.xml b/novdoc/tests/article-base.xml
new file mode 100644
index 0000000..99c1bba
--- /dev/null
+++ b/novdoc/tests/article-base.xml
@@ -0,0 +1,6 @@
+
+
+
+ Article Test
+ The quick brown fox jumps over the lazy dog.
+
diff --git a/novdoc/tests/novdocx.dtd b/novdoc/tests/novdocx.dtd
new file mode 120000
index 0000000..e70e520
--- /dev/null
+++ b/novdoc/tests/novdocx.dtd
@@ -0,0 +1 @@
+../dtd/novdocx.dtd
\ No newline at end of file
diff --git a/novdoc/tests/run-tests.sh b/novdoc/tests/run-tests.sh
new file mode 100755
index 0000000..a57af44
--- /dev/null
+++ b/novdoc/tests/run-tests.sh
@@ -0,0 +1,105 @@
+#!/bin/bash
+#
+# Run test cases
+#
+# Author: Thomas Schraitle
+# Date: December 2016
+
+VALIDATOR="xmllint"
+PROG=${0##*/}
+PROGDIR=${0%/*}
+# Needed to be able to run it from different directories
+DTD=${PROGDIR}/../dtd/novdocx.dtd
+DTD=$(readlink -f ${DTD})
+ERRORS=0
+
+
+function validate_with_xmllint {
+ local _RNG=$1
+ local _XML=$2
+ local _ERROR=${2%*.xml}.err
+ xmllint --noout --nonet $_XML 2>$_ERROR
+ echo $?
+}
+
+function validator {
+ validate_with_xmllint "$1" "$2"
+}
+
+function print_help {
+ cat </dev/stderr
+ exit 10
+ fi
+ shift 2
+ ;;
+ --)
+ shift
+ break
+ ;;
+ esac
+done
+
+echo "Using validator '$VALIDATOR'..."
+
+# Cleanup any *.err files first...
+rm -f $PROGDIR/*.err 2>/dev/null
+
+# Iterating over all XML files inside this directory...
+for xmlfile in $PROGDIR/*.xml; do
+ result=$(validator $DTD $xmlfile )
+ if [[ $result = '0' ]]; then
+ RESULTSTR="\e[1;32mPASSED\e[0m"
+ else
+ RESULTSTR="\e[1;31mFAILED\e[0m"
+ ERRORS=$(($ERROR + 1))
+ fi
+ echo -e "Validating '$xmlfile'... $RESULTSTR"
+ if [[ $result != '0' ]]; then
+ cat "${xmlfile%*.xml}.err" 1>&2
+ echo "----------------------------------------------"
+ fi
+done
+
+echo
+if [[ $ERRORS -eq 0 ]]; then
+ echo -e "Found\e[1;32m $ERRORS errors\e[0m. Congratulations! :-)"
+else
+ echo -e "Found\e[1;31m $ERRORS error(s)\e[0m. :-("
+ exit 1
+fi
+
+# Remove any error files which are zero bytes, but keep the ones which contains
+# probable error messages
+for errfile in $PROGDIR/*.err; do
+ [[ -s $errfile ]] || rm $errfile 2>/dev/null
+done
+
+exit 0
\ No newline at end of file