forked from blazegraph/database
-
Notifications
You must be signed in to change notification settings - Fork 0
/
build.properties
473 lines (404 loc) · 18.5 KB
/
build.properties
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
# Bigdata ant build properties.
#
# $Id: build.properties 8723 2014-11-18 16:35:18Z thompsonbry $
#
# The root of the checked out svn source. This assumes that you have checked
# out the trunk so that all modules were automatically checked out and are in
# direct subdirectories of the directory containing this properties file and
# the ant build.xml file.
bigdata.dir=.
# Where the generated files will be written.
# build.dir/classes [compiled classes and misc resources for classpath]
# build.dir/docs [generated documentation].
# build.dir/docs/api [generated javadoc].
# build.dir/lib [bundled libraries copied here for easier deployment]
# build.dir/src [source code copied here for releases]
build.dir=ant-build
##
# javac options
##
# debug=on|off
javac.debug=on
# debuglevel=lines,vars,source (or any combination thereof).
javac.debuglevel=lines,vars,source
javac.verbose=off
javac.source=1.7
javac.target=1.7
javac.encoding=Cp1252
# javacc is required to compile the SPARQL grammar.
javacc.home=/usr/java/javacc-5.0
##
# Versions to use of various dependencies.
#
# Note: You must also update pom.xml when versions are changed.
#
# Note: The ICU version number for the downloaded jar is 4_8, which is
# how it is published by that project. However, the version number for
# the corresponding maven dependency is 4.8. I have renamed the jar to
# have consistent version numbers for ant and maven.
#
# Note: There is also a naming inconsistency for highscalelib. That
# project publishes "high-scale-lib-vXXXX.jar", but the maven dependency
# is named "high-scale-lib-XXXX.jar".
#
icu.version=4.8
zookeeper.version=3.4.5
sesame.version=2.7.13
slf4j.version=1.6.1
jetty.version=9.2.3.v20140905
servlet.version=3.1.0
lucene.version=3.0.0
apache.commons_codec.version=1.4
apache.commons_fileupload.version=1.3.1
apache.commons_io.version=2.1
apache.commons_logging.version=1.1.1
apache.httpclient.version=4.1.3
apache.httpclient_cache.version=4.1.3
apache.httpcore.version=4.1.4
apache.httpmime.version=4.1.3
colt.version=1.2.0
highscalelib.version=1.1.2
log4j.version=1.2.17
fastutil.version=6.5.16
dsiutils.version=1.0.6-020610
lgplutils.version=1.0.7-270114
ganglia.version=1.0.4
gas.version=0.1.0
jackson.version=2.3.1
blueprints.version=2.5.0
jettison.version=1.3.3
rexster.version=2.5.0
junit.version=4.11
hamcrest.version=1.3
# Set to false to NOT start services (zookeeper, lookup server, class server, etc).
# When false, tests which depend on those services will not run. (This can also be
# set by CI if you leave if undefined here.) For example:
# export skipTestServices=true
# ant -DskipTestServices=${skipTestServices} ...
#skipTestServices=false
##
# Properties for creating a release.
##
# Where the releases will be written.
release.dir=ant-release
# The build version (note: 0.82b -> 0.82.0); 0.83.2 is followed by 1.0.0
build.ver=1.5.2-MAPGRAPH-SNAPSHOT
build.ver.osgi=1.0
# Set true to do a snapshot build. This changes the value of ${version} to
# include the date.
snapshot=true
# Javadoc build may be disabled using this property. The javadoc target will
# not be executed unless this property is defined (its value does not matter).
# Note: The javadoc goes quite if you have enough memory, but can take forever
# and then runs out of memory if the JVM is starved for RAM. The heap for the
# javadoc JVM is explicitly set in the javadoc target in the build.xml file.
#javadoc=
# packaging property set (rpm, deb).
package.release=1
package.prefix=/usr
package.conf.dir=/etc/bigdata
package.fedname=BigdataFed
package.pid.dir=/var/run/bigdata
package.var.dir=/var/lib/bigdata
package.share.dir=/usr/share/bigdata
# The SCP program to use when uploading javadoc or releases.
#ssh.scp=C:/Program Files/PuTTY/pscp
ssh.scp=/usr/bin/scp
##
# Properties for running the standalone bigdata federation (clustered is below).
#
# See ant 'standalone-start' and related tasks.
##
standalone.fed=testFed
##
# Properties for installing bigdata. Many of these properties are both by the
# 'ant install' target and also wind up substituted into the 'bigdataenv' script,
# the main bigdata 'configuration' file, and the logger configuration files.
##
# The name of the bigdata federation instance.
FED=benchmark
# Bigdata-specific directory on a shared volume accessible by all hosts in the
# cluster.
#
# Note: You can create the appropriate permissions by creating the directory
# ahead of time and doing chown to set the user and group and then chmod to give
# the group read/write permissions.
NAS=/nas/bigdata/${FED}
# Bigdata-specific directory on a local volume. Each host in the cluster will
# place the persistent state for the bigdata services running on that host within
# this directory. The user which will execute bigdata MUST be able to read/write
# files on this path on each host. Therefore, if you are not installing as root
# this will need to be a file within the user's home directory or some directory
# which exists on each host and is writable by that user.
LAS=/data/bigdata/${FED}
#LAS=~/bigdata/${FED}
# The location of the installed JDK that will be used to build / run bigdata.
#
# Note: For Sun JVMs, 6u14 with compressed pointers is recommended for 64-bit
# machines having no more than 32G of RAM. Also, note that I have see core
# dumps with Sun jdk1.6.0_07 and FC6 on a 32-bit platform.
#
#JAVA_HOME=C:\\Program Files\\Java\\jdk1.6.0_10
JAVA_HOME=/usr/java/jdk1.7.0_55
#JAVA_HOME=/usr/lib/jvm/java-1.6.0-openjdk-1.6.0.0.x86_64
#JAVA_HOME=/usr/java/jrockit-R27.3.0-jdk1.6.0_01
# The port on which the jini class server will be running. This class server
# is started on whichever host(s) are configured to run jini. It is part of
# the set of jini core services which includes reggie, etc. It is NOT setup
# to expose any classes except those found in the JARs of the jini/lib-dl
# directory.
JINI_CLASS_SERVER_PORT=8081
# The port on which the load balancer expose an httpd service which makes
# available the aggregated performance counters, events, and index dumps.
LOAD_BALANCER_PORT=9999
# When true, all performance counters will be relayed to the load balancer.
# This significantly increases the memory burden on the load balancer, but
# it also provides much more detailed oversight into what the federation is
# doing. You need to set this to true to take advantage of the Excel worksheets
# for performance monitoring.
REPORT_ALL=false
# Where the sysstat utilities are found (performance counter reporting for un*x).
SYSSTAT_HOME=/usr/local/bin
#SYSSTAT_HOME=/usr/bin
# Specifies the value of com.sun.jini.jeri.tcp.useNIO. When true, use NIO for RMI.
USE_NIO=true
# Where to install the scripts (must by readable by all hosts).
install.bin.dir=${NAS}/bin
# Where to install the documentation.
install.doc.dir=${NAS}/doc
# Where to install the JARs (must be readable by all hosts).
install.lib.dir=${NAS}/lib
# Where to install the configuration files (must be readable by all hosts).
install.config.dir=${NAS}/config
# Where to write the log files.
install.log.dir=${NAS}/log
# Where to write the deployment distribution.
install.dist.dir=${NAS}/dist
# The 'install-as' user (defaults to the user running the installer).
#
# @TODO ant lacks sufficient mechanisms to set the user (chown).
#
# @TODO should this be used to specify the user in the jini service config files?
install.user=${user.name}
# The group on all hosts that is able to read the scripts, write log files, etc.
# This can be 'users' when trying to share across the hosts when running bigdata
# using a normal user login. It can be 'wheel' when trying to share across hosts
# using a root login.
#install.group=users
#install.group=wheel
install.group=${user.name}
# The file permissions mask used for files that must be read/write for all hosts.
#
# Note: umask uses the following numbers for permissions:
#
# 0 - read, write and execute
# 1 - read and write
# 2 - read and execute
# 3 - read only
# 4 - write and execute
# 5 - write only
# 6 - execute only
# 7 - no permissions
# 117 := user and group have read-write access
# 177 := user has read-write access, group and other have none.
#
# @todo not used yet - perhaps will never be used.
#
#umask.shared=117
#umask.local=177
# The command to obtain the lock for the bigdata subsystem lock file. Both
# lockfile (procmail) and dotlockfile (liblockfile1) will work. The "-1" for
# lock file is the sleeptime (seconds). The -r 1 in both cases has the same
# semantics and is the #of times to retry. The commands have different wait /
# sleep defaults (8 seconds for lockfile versus 5 seconds for dotlockfile), but
# you can not override this for dotlock file. The bigdata script only tests for
# an exit status of ZERO (0) to indicate that the lock was obtained.
#
# lockfile is part of procmail
LOCK_CMD=lockfile -r 1 -1
#
# dotlockfile is in the liblockfile1 package.
#LOCK_CMD=/usr/bin/dotlockfile -r 1
# The bigdata subsystem lock file. The user MUST be able to read/write this file
# on each host. Therefore, if you are not installing as root this will need to be
# a file within the user's home directory or some directory which exists on each
# host and is writable by that user. ${Las}/lockFile is the recommended default.
#LOCK_FILE=/var/lock/subsys/bigdata
LOCK_FILE=${LAS}/lockFile
# The main bigdata configuration file.
bigdata.config=${install.config.dir}/bigdataStandalone.config
# The main jini configuration file.
jini.config=${install.config.dir}/jini/startAll.config
# The policy file used to start clients and services. The default policy
# file is completely open.
policyFile=${install.config.dir}/policy.all
# The host that will run the log4j SimpleSocketLogger and the port on which
# the logger will listen. This gets written into the bigdata configuration
# file and the log4j.properties file such that the logger daemon will startup
# on this host and the clients and services will log onto a socket appender
# which logs onto this host. log4j.properties (the file used by the clients
# and services) is setup to log INFO+ onto this service. It will also log
# ERROR+ onto the local console in case the socket logger is down. The socket
# logger is setup in log4jServer.properties. It logs ERROR+ onto the errorLog
# (see below), INFO+ onto the detailLog (see below), and events onto the
# eventLog (see below).
#
# Note: java.util.logging messages DO NOT get written onto this logger -- only
# log4j messages.
#
LOG4J_SOCKET_LOGGER_HOST = bigdata01.bigdata.com
LOG4J_SOCKET_LOGGER_PORT = 4445
# The socket logger uses a DailyRollingFileAppender by default and this
# specifies the DatePattern property which determines both when the file
# will be rolled over and the name of the rolled over log file.
#
# Note: You are responsible for pruning old log files!
#
# roll over at midnight.
LOG4J_DATE_PATTERN='.'yyyy-MM-dd'.log'
# The log4j configuration file for the clients and services. This is used
# to set the log4j.configuration property.
#
# Note: This is a URL!!!
#
log4j.config=file:${install.config.dir}/log4j.properties
# The log4j configuration file for the SimpleSocketServer.
#
# Note: This is a FILE (not a URL)
#
log4jServer.config=${install.config.dir}/log4jServer.properties
# The java.util.logging configuration file. (Jini uses java.util.logging).
#
# Note: The java.util.logging system DOES NOT use the simple socket logger.
# You have to look at the console output or otherwise configure log message
# aggregation for java.util.logging separately.
#
logging.config=${install.config.dir}/logging.properties
# Bigdata messages at ERROR or above are logged on this file.
errorLog=${install.log.dir}/error.log
# Bigdata messages at INFO or above (or as configured) are logged on this file.
detailLog=${install.log.dir}/detail.log
# Bigdata events are logged on this file.
eventLog=${install.log.dir}/event.log
# Bigdata rule evaluation is logged on this file.
ruleLog=${install.log.dir}/rule.log
# Messages from the bigdata script are written here when it is run by cron. This
# file must be writable by all hosts.
#
# @todo are concurrent appends on this file getting lost?
stateLog=${install.log.dir}/state.log
# When cron or a similar process is used to periodically execute the 'bigdata'
# script, the script can be invoked with the name of this file and the value in
# the file will be interpreted as the goal state for the script. The value in
# the file is initially 'status'. It is changed to 'start' to bring up the
# bigdata federation. This file must be readable by all hosts. Writes may be
# restricted to a specific user.
stateFile=${NAS}/state
# Boolean option. When true, 'bigdata stop' and 'bigdata destroy' will use
# 'killall -9 java' to provide a sure kill for ALL java processes on the host.
# Needless to say, this option does not play well with other java components
# running on the same host (at least, running as the same user on the same
# host). This value is written into bigdataenv as an environment variable
# named "FORCE_KILL_ALL" so you can change the behavior after the install.
# See BLZG-1594 (Cluster: Setting 'stop' to state kills all java apps)
forceKillAll=false
#
# NTP setup (optional, not fully automated).
#
# These properties are substituted into the sample ntp-client.conf and ntp.conf
# files. Those files may be used as a guideline for setting up ntpd on your
# cluster, but please see the following resources.
#
# See http://www.cis.udel.edu/~mills/ntp/html/ntpdate.html
#
# See http://www.brennan.id.au/09-Network_Time_Protocol.html
#
# Once you have ntp configured, you can use 'bigdata ntpSet' to sync the machines
# to the ntp server and 'bigdata ntpStart' to restart the ntpd client on those
# machines once their clocks are synchronized (ntpd can not be running when you
# need to correct for a large clock error).
#
# Note: Most of bigdata does not rely on synchronized clocks. However, the
# performance counter reporting subsystem reports local timestamps. Therefore
# synchronized clocks can make it much easier to interpret the logged performance
# counters. The load balancer bases its decision on a subset of the performance
# counters, so if there is a large clock skew that can effect its decision making.
# The ntpd server on your local network. The value should be the value reported
# by 'hostname'. If you are not using ntpd or if you are managing it yourself,
# then you do not need to set this property.
NTP_MASTER=
# The local network address.
NTP_NETWORK=192.168.6.0
# The local network mask.
NTP_NETMASK=255.255.255.0
##
# Properties for the 'analysis' target.
##
# For convenience - used by other properties in this file.
#analysis.dir=E:/DPP/cluster16/U100000b/run21/opt2/run21c
analysis.dir=E:/DPP/dpaether123/async-write-runs-june-09/run5/nas/runs/run5
# Either the file or the directory containing the logged performance counters
# to be extracted for analysis.
analysis.counters.dir=${analysis.dir}/counters
# Either the file or the directory containing the queries to be executed. Each
# query is a URL formed just like the URLs used to query the live bigdata federation.
# The 'file=' URL query parameter should be added to write the output of the query
# on the named file.
analysis.queries=src/resources/analysis/queries
#analysis.queries=scratchQueries.txt
# The directory where the extracted performance counters will be written.
analysis.out.dir=${analysis.dir}/output
##
# Properties for the "install-lubm" target (optional).
##
# Basic install directory (scripts will go into [install.bin.dir]).
install.lubm.dir=${NAS}/lubm
# Where to install the JAR.
install.lubm.lib.dir=${install.lubm.dir}/lib
# Where to install the ontology and configuration files
install.lubm.config.dir=${install.lubm.dir}/config
# Note: but sure to choose a port that is not already in use by the
# load balancer, by the jini core services class server, etc. You
# MUST specify the same port in the java.rmi.server.codebase property
# (this is done automatically below).
LUBM_CLASS_SERVER_PORT = 8082
# The name of the host on which the class server is running. This must
# be the host on which you run the 'ant install' target since the class
# server is configured to serve up classes from the ant-build/classes
# directory.
LUBM_CLASS_SERVER_HOSTNAME = XXX
# The java.rmi.server.codebase for lubmMaster.sh. You can use wget to verify
# that the class server is working (once you start it using classServer.sh).
#
# wget -o /dev/null --no-cache ${LUBM_RMI_CODEBASE_URL}edu/lehigh/swat/bench/ubt/bigdata/LubmGeneratorMaster.class
#
LUBM_RMI_CODEBASE_URL = http://${LUBM_CLASS_SERVER_HOSTNAME}:${LUBM_CLASS_SERVER_PORT}/
# The LUBM configuration files and the ontology can be found on the installed system.
LUBM_ONTOLOGY_DIR=$NAS/lubm
# properties needed to install bigdata behind the OpenRDF Sesame HTTP Server
#sesame.server.dir = C:/server/apache-tomcat-6.0.24/webapps/openrdf-sesame
#workbench.server.dir = C:/server/apache-tomcat-6.0.24/webapps/openrdf-workbench
#aduna.data.dir = C:/Documents and Settings/mike/Application Data/Aduna/OpenRDF Sesame console
#sesame.dir = C:/dev/openrdf-sesame-2.3.0
sesame.server.dir = D:/apache-tomcat-6.0.26/webapps/openrdf-sesame
workbench.server.dir = D:/apache-tomcat-6.0.26/webapps/openrdf-workbench
aduna.data.dir = C:/Documents and Settings/Bryan Thompson/Application Data/Aduna/OpenRDF Sesame console
sesame.dir = D:/openrdf-sesame-2.3.1
##
# Properties which affect the performance test runs.
#
# The top-level directory above the various data files to be used.
perf.data.dir=/usr/bigdata/data
# The directory in which the performance tests will be run. This directory needs to be
# on a volume with a lot of room. The directory may be destroyed (by the test harness)
# after the performance tests have run their course.
perf.run.dir=/usr/bigdata/runs
# CI properties. These must agree with the actual installation directory and zoo.cfg
# file for the zookeeper instance used to run CI.
#test.zookeeper.installDir=/opt/zookeeper-${zookeeper.version}
#test.zookeeper.installDir=/usr/zookeeper-current
local.test.zookeeper.installDir=/usr/zookeeper-current
#test.zookeeper.installDir=/usr/java/zookeeper-${zookeeper.version}
#test.zookeeper.installDir=/usr/local/zookeeper-${zookeeper.version}
test.zookeeper.tickTime=2000
test.zookeeper.clientPort=2081