Skip to content

Commit f6b3dd2

Browse files
committed
Add support for OWL micro, mini and full reasoners
Add LanguageLevel so that one can configure the level of OWL supported. Remove unused getSpec() in AbstractOntologyService.
1 parent b796fc0 commit f6b3dd2

5 files changed

Lines changed: 111 additions & 26 deletions

File tree

src/ubic/basecode/ontology/jena/AbstractOntologyMemoryBackedService.java

Lines changed: 41 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,13 @@
1515
package ubic.basecode.ontology.jena;
1616

1717
import com.hp.hpl.jena.ontology.OntModelSpec;
18+
import com.hp.hpl.jena.ontology.ProfileRegistry;
19+
import com.hp.hpl.jena.rdf.model.ModelFactory;
20+
import com.hp.hpl.jena.reasoner.ReasonerFactory;
21+
import com.hp.hpl.jena.reasoner.rulesys.OWLFBRuleReasonerFactory;
22+
import com.hp.hpl.jena.reasoner.rulesys.OWLMicroReasonerFactory;
23+
import com.hp.hpl.jena.reasoner.rulesys.OWLMiniReasonerFactory;
24+
import com.hp.hpl.jena.reasoner.transitiveReasoner.TransitiveReasonerFactory;
1825
import ubic.basecode.ontology.model.OntologyModel;
1926
import ubic.basecode.util.Configuration;
2027

@@ -35,23 +42,50 @@ protected String getOntologyUrl() {
3542
}
3643

3744
@Override
38-
protected OntologyModel loadModel( boolean processImports, InferenceMode inferenceMode ) throws IOException {
39-
return new OntologyModelImpl( OntologyLoader.loadMemoryModel( this.getOntologyUrl(), this.getCacheName(), processImports, this.getSpec( inferenceMode ) ) );
45+
protected OntologyModel loadModel( boolean processImports, LanguageLevel languageLevel, InferenceMode inferenceMode ) throws IOException {
46+
return new OntologyModelImpl( OntologyLoader.loadMemoryModel( this.getOntologyUrl(), this.getCacheName(), processImports, this.getSpec( languageLevel, inferenceMode ) ) );
4047
}
4148

4249
@Override
43-
protected OntologyModel loadModelFromStream( InputStream is, boolean processImports, InferenceMode inferenceMode ) throws IOException {
44-
return new OntologyModelImpl( OntologyLoader.loadMemoryModel( is, this.getOntologyUrl(), processImports, this.getSpec( inferenceMode ) ) );
50+
protected OntologyModel loadModelFromStream( InputStream is, boolean processImports, LanguageLevel languageLevel, InferenceMode inferenceMode ) throws IOException {
51+
return new OntologyModelImpl( OntologyLoader.loadMemoryModel( is, this.getOntologyUrl(), processImports, this.getSpec( languageLevel, inferenceMode ) ) );
4552
}
4653

47-
private OntModelSpec getSpec( InferenceMode inferenceMode ) {
54+
private OntModelSpec getSpec( LanguageLevel languageLevel, InferenceMode inferenceMode ) {
55+
String profile;
56+
switch ( languageLevel ) {
57+
case FULL:
58+
profile = ProfileRegistry.OWL_LANG;
59+
break;
60+
case DL:
61+
profile = ProfileRegistry.OWL_DL_LANG;
62+
break;
63+
case LITE:
64+
profile = ProfileRegistry.OWL_LITE_LANG;
65+
break;
66+
default:
67+
throw new UnsupportedOperationException( String.format( "Unsupported OWL language level %s.", languageLevel ) );
68+
}
69+
ReasonerFactory reasonerFactory;
4870
switch ( inferenceMode ) {
71+
case FULL:
72+
reasonerFactory = OWLFBRuleReasonerFactory.theInstance();
73+
break;
74+
case MINI:
75+
reasonerFactory = OWLMiniReasonerFactory.theInstance();
76+
break;
77+
case MICRO:
78+
reasonerFactory = OWLMicroReasonerFactory.theInstance();
79+
break;
4980
case TRANSITIVE:
50-
return OntModelSpec.OWL_MEM_TRANS_INF;
81+
reasonerFactory = TransitiveReasonerFactory.theInstance();
82+
break;
5183
case NONE:
52-
return OntModelSpec.OWL_MEM;
84+
reasonerFactory = null;
85+
break;
5386
default:
5487
throw new UnsupportedOperationException( String.format( "Unsupported inference level %s.", inferenceMode ) );
5588
}
89+
return new OntModelSpec( ModelFactory.createMemModelMaker(), null, reasonerFactory, profile );
5690
}
5791
}

src/ubic/basecode/ontology/jena/AbstractOntologyService.java

Lines changed: 24 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@ public abstract class AbstractOntologyService implements OntologyService {
7474
}
7575

7676
/* settings (applicable for next initialization) */
77+
private LanguageLevel nextLanguageLevel = LanguageLevel.FULL;
7778
private InferenceMode nextInferenceMode = InferenceMode.TRANSITIVE;
7879
private boolean nextProcessImports = true;
7980
private boolean nextSearchEnabled = true;
@@ -91,12 +92,30 @@ public abstract class AbstractOntologyService implements OntologyService {
9192
private Set<Restriction> additionalRestrictions;
9293
private boolean isInitialized = false;
9394
@Nullable
95+
private LanguageLevel languageLevel = null;
96+
@Nullable
9497
private InferenceMode inferenceMode = null;
9598
@Nullable
9699
private Boolean processImports = null;
97100
@Nullable
98101
private Boolean searchEnabled = null;
99102

103+
@Override
104+
public LanguageLevel getLanguageLevel() {
105+
Lock lock = rwLock.readLock();
106+
try {
107+
lock.lock();
108+
return this.languageLevel != null ? this.languageLevel : nextLanguageLevel;
109+
} finally {
110+
lock.unlock();
111+
}
112+
}
113+
114+
@Override
115+
public void setLanguageLevel( LanguageLevel languageLevel ) {
116+
this.nextLanguageLevel = languageLevel;
117+
}
118+
100119
@Override
101120
public InferenceMode getInferenceMode() {
102121
Lock lock = rwLock.readLock();
@@ -162,6 +181,7 @@ private void initialize( @Nullable InputStream stream, boolean forceLoad, boolea
162181
String ontologyUrl = getOntologyUrl();
163182
String ontologyName = getOntologyName();
164183
String cacheName = getCacheName();
184+
LanguageLevel languageLevel = nextLanguageLevel;
165185
InferenceMode inferenceMode = nextInferenceMode;
166186
boolean processImports = nextProcessImports;
167187
boolean searchEnabled = nextSearchEnabled;
@@ -196,7 +216,7 @@ private void initialize( @Nullable InputStream stream, boolean forceLoad, boolea
196216
return;
197217

198218
try {
199-
OntologyModel m = stream != null ? loadModelFromStream( stream, processImports, inferenceMode ) : loadModel( processImports, inferenceMode ); // can take a while.
219+
OntologyModel m = stream != null ? loadModelFromStream( stream, processImports, languageLevel, inferenceMode ) : loadModel( processImports, languageLevel, inferenceMode ); // can take a while.
200220
if ( m instanceof OntologyModelImpl ) {
201221
model = ( ( OntologyModelImpl ) m ).getOntModel();
202222
} else {
@@ -254,6 +274,7 @@ private void initialize( @Nullable InputStream stream, boolean forceLoad, boolea
254274
this.additionalRestrictions = additionalRestrictions;
255275
this.index = index;
256276
this.isInitialized = true;
277+
this.languageLevel = languageLevel;
257278
this.inferenceMode = inferenceMode;
258279
this.processImports = processImports;
259280
this.searchEnabled = searchEnabled;
@@ -615,13 +636,13 @@ public void waitForInitializationThread() throws InterruptedException {
615636
* Delegates the call as to load the model into memory or leave it on disk. Simply delegates to either
616637
* OntologyLoader.loadMemoryModel( url ); OR OntologyLoader.loadPersistentModel( url, spec );
617638
*/
618-
protected abstract OntologyModel loadModel( boolean processImports, InferenceMode inferenceMode ) throws IOException;
639+
protected abstract OntologyModel loadModel( boolean processImports, LanguageLevel languageLevel, InferenceMode inferenceMode ) throws IOException;
619640

620641

621642
/**
622643
* Load a model from a given input stream.
623644
*/
624-
protected abstract OntologyModel loadModelFromStream( InputStream stream, boolean processImports, InferenceMode inferenceMode ) throws IOException;
645+
protected abstract OntologyModel loadModelFromStream( InputStream stream, boolean processImports, LanguageLevel languageLevel, InferenceMode inferenceMode ) throws IOException;
625646

626647
/**
627648
* A name for caching this ontology, or null to disable caching.
@@ -633,17 +654,6 @@ protected String getCacheName() {
633654
return getOntologyName();
634655
}
635656

636-
private OntModelSpec getSpec( InferenceMode inferenceMode ) {
637-
switch ( inferenceMode ) {
638-
case TRANSITIVE:
639-
return OntModelSpec.OWL_MEM_TRANS_INF;
640-
case NONE:
641-
return OntModelSpec.OWL_MEM;
642-
default:
643-
throw new UnsupportedOperationException( String.format( "Unsupported inference level %s.", inferenceMode ) );
644-
}
645-
}
646-
647657
@Override
648658
public void index( boolean force ) {
649659
String cacheName = getCacheName();

src/ubic/basecode/ontology/providers/MedicOntologyService.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,12 +52,12 @@ protected String getOntologyUrl() {
5252
}
5353

5454
@Override
55-
protected OntologyModel loadModel( boolean processImports, InferenceMode inferenceMode ) {
55+
protected OntologyModel loadModel( boolean processImports, LanguageLevel languageLevel, InferenceMode inferenceMode ) {
5656
try ( InputStream is = this.getClass().getResourceAsStream( MEDIC_ONTOLOGY_FILE ) ) {
5757
if ( is == null ) {
5858
throw new RuntimeException( String.format( "The MEDIC ontology was not found in classpath at %s.", MEDIC_ONTOLOGY_FILE ) );
5959
}
60-
return loadModelFromStream( new GZIPInputStream( is ), processImports, inferenceMode );
60+
return loadModelFromStream( new GZIPInputStream( is ), processImports, languageLevel, inferenceMode );
6161
} catch ( IOException e ) {
6262
throw new RuntimeException( e );
6363
}

src/ubic/basecode/ontology/providers/NIFSTDOntologyService.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,12 +43,12 @@ protected String getOntologyUrl() {
4343
}
4444

4545
@Override
46-
protected OntologyModel loadModel( boolean processImports, InferenceMode inferenceMode ) {
46+
protected OntologyModel loadModel( boolean processImports, LanguageLevel languageLevel, InferenceMode inferenceMode ) {
4747
try ( InputStream stream = getClass().getResourceAsStream( NIFSTD_ONTOLOGY_FILE ) ) {
4848
if ( stream == null ) {
4949
throw new RuntimeException( String.format( "The NIF ontology was not found in classpath at %s.", NIFSTD_ONTOLOGY_FILE ) );
5050
}
51-
return loadModelFromStream( new GZIPInputStream( stream ), processImports, inferenceMode );
51+
return loadModelFromStream( new GZIPInputStream( stream ), processImports, languageLevel, inferenceMode );
5252
} catch ( IOException e ) {
5353
throw new RuntimeException( e );
5454
}

src/ubic/basecode/ontology/providers/OntologyService.java

Lines changed: 42 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,50 @@ public interface OntologyService {
1616

1717
void setProcessImports( boolean processImports );
1818

19+
enum LanguageLevel {
20+
/**
21+
* The full OWL language.
22+
*/
23+
FULL,
24+
/**
25+
* OWL-DL
26+
*/
27+
DL,
28+
/**
29+
* OWL/Lite
30+
*/
31+
LITE
32+
}
33+
34+
LanguageLevel getLanguageLevel();
35+
36+
void setLanguageLevel( LanguageLevel languageLevel );
37+
1938
enum InferenceMode {
39+
/**
40+
* No inference is supported, only the axioms defined in the ontology are considered.
41+
*/
2042
NONE,
21-
TRANSITIVE
43+
/**
44+
* Only basic inference is supported for {@code subClassOf} and {@code subPropertyOf}.
45+
* <p>
46+
* This is the fastest inference mode.
47+
*/
48+
TRANSITIVE,
49+
/**
50+
* Very limited inference.
51+
*/
52+
MICRO,
53+
/**
54+
* Limited inference.
55+
*/
56+
MINI,
57+
/**
58+
* Complete inference.
59+
* <p>
60+
* This is the slowest inference mode.
61+
*/
62+
FULL
2263
}
2364

2465
/**

0 commit comments

Comments
 (0)