diff --git a/lib/client/gnt_cluster.py b/lib/client/gnt_cluster.py
index c5eebf77508c56be2e10d08d42ce570b73f2f6f3..dec37dc1b58eb819cc523e48dd6b13f69e1bb12a 100644
--- a/lib/client/gnt_cluster.py
+++ b/lib/client/gnt_cluster.py
@@ -456,12 +456,16 @@ def VerifyCluster(opts, args):
   simulate = opts.simulate_errors
   skip_checks = []
 
-  # Verify cluster config.
-  op = opcodes.OpClusterVerifyConfig(verbose=opts.verbose,
-                                     error_codes=opts.error_codes,
-                                     debug_simulate_errors=simulate)
+  if opts.nodegroup is None:
+    # Verify cluster config.
+    op = opcodes.OpClusterVerifyConfig(verbose=opts.verbose,
+                                       error_codes=opts.error_codes,
+                                       debug_simulate_errors=simulate)
 
-  success, all_groups = SubmitOpCode(op, opts=opts)
+    success, all_groups = SubmitOpCode(op, opts=opts)
+  else:
+    success = True
+    all_groups = [opts.nodegroup]
 
   if opts.skip_nplusone_mem:
     skip_checks.append(constants.VERIFY_NPLUSONE_MEM)
@@ -1259,7 +1263,7 @@ commands = {
   'verify': (
     VerifyCluster, ARGS_NONE,
     [VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
-     DRY_RUN_OPT, PRIORITY_OPT],
+     DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT],
     "", "Does a check on the cluster configuration"),
   'verify-disks': (
     VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
diff --git a/man/gnt-cluster.rst b/man/gnt-cluster.rst
index 866fba64c7a1a061d243e1c77b05f108cf5ccf45..d3fcecd15c5aa0f5a894d859a2e7f1066c915686 100644
--- a/man/gnt-cluster.rst
+++ b/man/gnt-cluster.rst
@@ -579,7 +579,7 @@ node will be listed as /nodes/*name*, and an instance as
 VERIFY
 ~~~~~~
 
-**verify** [--no-nplus1-mem]
+**verify** [--no-nplus1-mem] [--node-group *nodegroup*]
 
 Verify correctness of cluster configuration. This is safe with
 respect to running instances, and incurs no downtime of the
@@ -589,6 +589,11 @@ If the ``--no-nplus1-mem`` option is given, Ganeti won't check
 whether if it loses a node it can restart all the instances on
 their secondaries (and report an error otherwise).
 
+With ``--node-group``, restrict the verification to those nodes and
+instances that live in the named group. This will not verify global
+settings, but will allow to perform verification of a group while other
+operations are ongoing in other groups.
+
 VERIFY-DISKS
 ~~~~~~~~~~~~