Skip to content

Commit 8c8a5a7

Browse files
committed
Remove now-unnecessary string literal concatenation.
black puts two string literals on the same line when they fit, and its style (for exceptions especially) often means they now do even if they didn't before. So we can now remove these concatentations in favor of single longer literals.
1 parent deb4e99 commit 8c8a5a7

12 files changed

Lines changed: 23 additions & 25 deletions

python/lsst/pipe/base/argumentParser.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -476,13 +476,13 @@ def __init__(self, name, usage="%(prog)s input [options]", **kwargs):
476476
self.add_argument(
477477
"--output",
478478
dest="rawOutput",
479-
help="path to output data repository (need not exist), " f"relative to ${DEFAULT_OUTPUT_NAME}",
479+
help=f"path to output data repository (need not exist), relative to ${DEFAULT_OUTPUT_NAME}",
480480
)
481481
self.add_argument(
482482
"--rerun",
483483
dest="rawRerun",
484484
metavar="[INPUT:]OUTPUT",
485-
help="rerun name: sets OUTPUT to ROOT/rerun/OUTPUT; " "optionally sets ROOT to ROOT/rerun/INPUT",
485+
help="rerun name: sets OUTPUT to ROOT/rerun/OUTPUT; optionally sets ROOT to ROOT/rerun/INPUT",
486486
)
487487
self.add_argument(
488488
"-c",

python/lsst/pipe/base/config.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ def __new__(cls, name, bases, dct, **kwargs):
111111
configConnectionsNamespace = {}
112112
for fieldName, obj in connectionsClass.allConnections.items():
113113
configConnectionsNamespace[fieldName] = pexConfig.Field(
114-
dtype=str, doc=f"name for " f"connection {fieldName}", default=obj.name
114+
dtype=str, doc=f"name for connection {fieldName}", default=obj.name
115115
)
116116
# If there are default templates also add them as fields to
117117
# configure the template values
@@ -130,7 +130,7 @@ def __new__(cls, name, bases, dct, **kwargs):
130130
# add it to the Config class that is currently being declared
131131
dct["connections"] = pexConfig.ConfigField(
132132
dtype=Connections,
133-
doc="Configurations describing the" " connections of the PipelineTask to datatypes",
133+
doc="Configurations describing the connections of the PipelineTask to datatypes",
134134
)
135135
dct["ConnectionsConfigClass"] = Connections
136136
dct["ConnectionsClass"] = connectionsClass

python/lsst/pipe/base/connectionTypes.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ class DimensionedConnection(BaseConnection):
153153
def __post_init__(self):
154154
if isinstance(self.dimensions, str):
155155
raise TypeError(
156-
"Dimensions must be iterable of dimensions, got str," "possibly omitted trailing comma"
156+
"Dimensions must be iterable of dimensions, got str, possibly omitted trailing comma"
157157
)
158158
if not isinstance(self.dimensions, typing.Iterable):
159159
raise TypeError("Dimensions must be iterable of dimensions")

python/lsst/pipe/base/connections.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -407,7 +407,7 @@ def __init__(self, *, config: "PipelineTaskConfig" = None):
407407

408408
if config is None or not isinstance(config, PipelineTaskConfig):
409409
raise ValueError(
410-
"PipelineTaskConnections must be instantiated with" " a PipelineTaskConfig instance"
410+
"PipelineTaskConnections must be instantiated with a PipelineTaskConfig instance"
411411
)
412412
self.config = config
413413
# Extract the template names that were defined in the config instance
@@ -496,7 +496,7 @@ def buildDatasetRefs(
496496
# to handle, throw
497497
else:
498498
raise ValueError(
499-
f"Attribute with name {attributeName} has no counterpoint " "in input quantum"
499+
f"Attribute with name {attributeName} has no counterpoint in input quantum"
500500
)
501501
return inputDatasetRefs, outputDatasetRefs
502502

python/lsst/pipe/base/graph/_implDetails.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -79,9 +79,7 @@ def addProducer(self, key: _T, value: _U):
7979
Raised if key is already declared to be produced by another value
8080
"""
8181
if (existing := self._producers.get(key)) is not None and existing != value:
82-
raise ValueError(
83-
f"Only one node is allowed to produce {key}, " f"the current producer is {existing}"
84-
)
82+
raise ValueError(f"Only one node is allowed to produce {key}, the current producer is {existing}")
8583
self._producers[key] = value
8684
if self._createInverse:
8785
self._itemsDict[value].add(key)

python/lsst/pipe/base/graph/_loadHelpers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ def load(
241241
remainder = nodes - self.headerInfo.map.keys()
242242
if remainder:
243243
raise ValueError(
244-
f"Nodes {remainder} were requested, but could not be found in the input " "graph"
244+
f"Nodes {remainder} were requested, but could not be found in the input graph"
245245
)
246246
_readBytes = self._readBytes
247247
return self.deserializer.constructGraph(nodes, _readBytes, universe)

python/lsst/pipe/base/graphBuilder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -613,7 +613,7 @@ def connectDataIds(
613613
queryArgs["collections"] = collections
614614
else:
615615
raise ValueError(
616-
f"Unable to handle type {datasetQueryConstraint} given as " "datasetQueryConstraint."
616+
f"Unable to handle type {datasetQueryConstraint} given as datasetQueryConstraint."
617617
)
618618

619619
with registry.queryDataIds(**queryArgs).materialize() as commonDataIds:

python/lsst/pipe/base/pipeTools.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def isPipelineOrdered(pipeline, taskFactory=None):
9797
for attr in iterConnections(taskDef.connections, "outputs"):
9898
if attr.name in producerIndex:
9999
raise DuplicateOutputError(
100-
"DatasetType `{}' appears more than " "once as output".format(attr.name)
100+
"DatasetType `{}' appears more than once as output".format(attr.name)
101101
)
102102
producerIndex[attr.name] = idx
103103

@@ -152,7 +152,7 @@ def orderPipeline(pipeline):
152152
for dsTypeDescr in dsMap.values():
153153
if dsTypeDescr.name in allOutputs:
154154
raise DuplicateOutputError(
155-
"DatasetType `{}' appears more than " "once as output".format(dsTypeDescr.name)
155+
"DatasetType `{}' appears more than once as output".format(dsTypeDescr.name)
156156
)
157157
outputs[idx] = set(dsTypeDescr.name for dsTypeDescr in dsMap.values())
158158
allOutputs.update(outputs[idx])

python/lsst/pipe/base/pipeline.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ class LabelSpecifier:
9696
def __post_init__(self):
9797
if self.labels is not None and (self.begin or self.end):
9898
raise ValueError(
99-
"This struct can only be initialized with a labels set or " "a begin (and/or) end specifier"
99+
"This struct can only be initialized with a labels set or a begin (and/or) end specifier"
100100
)
101101

102102

@@ -340,7 +340,7 @@ def subsetFromLabels(self, labelSpecifier: LabelSpecifier) -> Pipeline:
340340
if labelSpecifier.end is not None:
341341
if labelSpecifier.end not in labels:
342342
raise ValueError(
343-
f"End of range subset, {labelSpecifier.end}, not found in pipeline " "definition"
343+
f"End of range subset, {labelSpecifier.end}, not found in pipeline definition"
344344
)
345345

346346
labelSet = set()
@@ -381,7 +381,7 @@ def _parse_file_specifier(uri: Union[str, ButlerURI]) -> Tuple[ButlerURI, Option
381381
if "," in label_subset:
382382
if ".." in label_subset:
383383
raise ValueError(
384-
"Can only specify a list of labels or a range" "when loading a Pipline not both"
384+
"Can only specify a list of labels or a rangewhen loading a Pipline not both"
385385
)
386386
args = {"labels": set(label_subset.split(","))}
387387
# labels supplied as a range
@@ -634,7 +634,7 @@ def _toExpandedPipelineImpl(self, checkContracts=True) -> Iterable[TaskDef]:
634634
if not success:
635635
extra_info = f": {contract.msg}" if contract.msg is not None else ""
636636
raise pipelineIR.ContractError(
637-
f"Contract(s) '{contract.contract}' were not " f"satisfied{extra_info}"
637+
f"Contract(s) '{contract.contract}' were not satisfied{extra_info}"
638638
)
639639

640640
taskDefs = sorted(taskDefs, key=lambda x: x.label)

python/lsst/pipe/base/pipelineIR.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def construct_mapping(self, node, deep=False):
5959
duplicates = {k for k, i in all_keys.items() if i != 1}
6060
if duplicates:
6161
raise KeyError(
62-
"Pipeline files must not have duplicated keys, " f"{duplicates} appeared multiple times"
62+
f"Pipeline files must not have duplicated keys, {duplicates} appeared multiple times"
6363
)
6464
return mapping
6565

@@ -143,7 +143,7 @@ def from_primitives(label: str, value: Union[List[str], dict]) -> LabeledSubset:
143143
subset = value.pop("subset", None)
144144
if subset is None:
145145
raise ValueError(
146-
"If a labeled subset is specified as a mapping, it must contain the key " "'subset'"
146+
"If a labeled subset is specified as a mapping, it must contain the key 'subset'"
147147
)
148148
description = value.pop("description", None)
149149
elif isinstance(value, abcIterable):
@@ -427,7 +427,7 @@ def toPipelineIR(self) -> "PipelineIR":
427427
"""
428428
if self.include and self.exclude:
429429
raise ValueError(
430-
"Both an include and an exclude list cant be specified" " when declaring a pipeline import"
430+
"Both an include and an exclude list cant be specified when declaring a pipeline import"
431431
)
432432
tmp_pipeline = PipelineIR.from_uri(os.path.expandvars(self.location))
433433
if self.instrument is not KeepInstrument:
@@ -675,7 +675,7 @@ def process_args(argument: Union[str, dict]) -> dict:
675675
# from this pipeline
676676
if accumulate_labeled_subsets.keys() & self.tasks.keys():
677677
raise ValueError(
678-
"Labeled subset names must be unique amongst imports in both labels and " " named Subsets"
678+
"Labeled subset names must be unique amongst imports in both labels and named Subsets"
679679
)
680680
# merge in the named subsets for self so this document can override any
681681
# that have been delcared

0 commit comments

Comments
 (0)