-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathbangdb.config
220 lines (163 loc) · 10 KB
/
bangdb.config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
//******* BangDB engine related config *******
//IMPORTANT: most interesting and contextual (local to each app) params are marked with *** (3 stars) in the
// beginning of the comments. Little less interesting are marked with **(2 stars) and still less
// with *(single star). Params that are not marked with any star, please be doubly sure before changing them
//***the dir where the db file will be created, Please edit it with suitable dir location
//default is the local dir, note this can be provided as input param while creating a database
SERVER_DIR = /root/store/
//***the name of the db (applicable only for server and other clients (including embedded db) should define the name while calling the API)
//for embedded db case, name is provided as input param while creating/opening a new database (hyphen '-' not allowed in the name)
BANGDB_DATABASE_NAME = bangdbData
//***the name of the table (applicable only for server and other clients (including embedded db) should define the name while calling the API)
//for embedded db case, name is provided as input param while creating/opening a table (hyphen '-' not allowed in the name)
BANGDB_TABLE_NAME = bangdbTableDdata
//bangdb group id
BANGDB_GROUP = bangdb
//*page size in bytes. This is constant for the db and applies to all tables. It can't be changed in future once set
//for large key_size, please set the page size larger. Basically we should try to have at least 32 keys in a page
//but more key we can accomodate in a page, more efficient the computation will be
//default size of 8192 works pretty well for keys upto 300 bytes length. Try to design your keys such that it's
//as small as possible such as below 32 bytes
PAGE_SIZE = 8192
//**type of persistence (0=INMEM_ONLY, 1=INMEM_PERSIST, 2=PERSIST_ONLY)
//applies to a table, hence within a db, different tables can have different persist types
//0 (INMEM_ONLY) means data is only in memory not backed by any file on disk. If process terminates and data dump is not taken
//then data is lost. option 1 (INMEM_PERSIST) means, data in memory is backed by file on disk hence db can handle much more data than
//allocated memory, which is not possible with option 0 (INMEM_ONLY) where db is limited to handle amount of data equal to the size of
//memory (allocated). Option 2 (PERSIST_ONLY) if other extreme where all operations are done with direct I/O from file on the disk.
//Hence this is most conservative in nature with very high data durability but with low performance.
//If you want to handle limited data with highest performance, then use option 0 (and you can take data dump whenever you want if you
//would like to save data or just throw it when done). If you want to handle very large amount of data then instead of adding more memory
//(RAM) to the machine, you can go with option 1 where db utilizes the available memory and handles lot more data than the size of memory.
//And it does it very intelligently to provide high throughput and low latency ensuring high performance. Also you can switch on write
//ahead log and keep on flushing it with at regular interval etc... Finally when data durability is of highest importance and volume of
//data to be handled is low (in the range of 500MB or so) then option 2 (PERSIST_ONLY) if good
BANGDB_PERSIST_TYPE = 1
//**type of index (1 = EXTHASH, 2 = BTREE)
//Applicable for a table hence within a db, different tables can have different index types
BANGDB_INDEX_TYPE = 2
//**Important when BangDB is run in transaction mode, If auto commit is off(0) then explicit transaction
//is required (begin, commit/abort), else implicit non-transactional single op can be run in usual manner
//later this can be set/unset whenever required using the API exposed by connection
BANGDB_AUTOCOMMIT = 1
//*transaction cache size in terms of number of concurrent transactions. Increasing this would decrease the probability
//of transaction getting aborted due to forced reclaim of cache nodes. But default works well in most of the situation
BANGDB_TRANSACTION_CACHE_SIZE = 512
//***write ahead log enabled = 1, disabled = 0
//This is applicable for a table, hence within a db, different tables can have different setting
BANGDB_LOG = 1
//**dat buffer or value size in KB (max)
//This is applicable for db hence applies to all tables
DAT_SIZE = 64
//**key size in bytes (max), min allowed size is 8 bytes
//Please keep this as low as possible in terms of size for better db performance and efficiency.
//This applies to a table hence within a db, different tables can have different key sizes. But once set
//it can't be changed in future
KEY_SIZE = 24
//*the default max size of the resutset returned while range scan (in MB)
//the range scan query can limit the amount of data to be returned using this value
MAX_RESULTSET_SIZE = 2
//max number of tables a db can have
MAXTABLE = 64
//*max number of connections a client can have, note: all will share the bpool
//This should not be confused with maxnimum number of concurrent clients connections to the server,
//for that see MAX_CLIENT_EVENTS
MAXCONN = 128
//*mainly for bangdb embedded not relevant for server
//max threads used by the client (application or client should not have more concurrent threads than this number)
//need not be too accurate but it helps in optimizinf housekeeping
MAX_THREADS = 128
//**maximum number of concurrent connections to the server or num of concurrent connections server can handle
//default is 10000, but change it to less number as suitable. Please don't go beyond 10000 as of now
//not relevant for bangdb embedded
MAX_CLIENT_EVENTS = 10000
//***buf pool size (in MB)
BUFF_POOL_SIZE_HINT = 1024
//**log buffer size (in MB)
LOG_BUF_SIZE = 64
//*master log size (in KB), keep it low, typically 64KB is very high for 100s of millions of records
MASTER_LOG_BUF_SIZE = 64
//**the sync for db data while close, 0 means no sync else sync
BANGDB_SYNC = 0
//**the frequency for log flush in micro sec
LOG_FLUSH_FREQ = 50000
//***to enable(1)/disable(0) check pointing
CHKPNT_ENABLED = 0
//*check point frequency in micro sec
CHKPNT_FREQ = 3370000
//*log split check frequency in micro sec
LOG_SPLIT_CHECK_FREQ = 23000
//*the buffer cache dirty page flusher and the buffer cache memory reclaimer frequency in micro sec
//note that this is just a hint and db changes this as per need
BUF_FLUSH_RECLAIM_FREQ = 60000
//in case of deep pressure on memory, grow the buffer by amount in MB
GROW_BUFF_SIZE = 16
//*max pages to look for scatter gather, put 0 to select the system supported number (suggested), else put whatever num, but if it's more than system supported
//then it will be corrected to the system suppored one
SCATTER_GATHER_MAX = 0
//please ensure before changing the parameters below as they may affect the db's performance
//max hdrs to scan to look for dirty pages
MIN_DIRTY_SCAN = 128
//max headers to scan to find a updated page
MIN_UPDATED_SCAN = 32
//this defines the constraints for flushing the index pages
IDX_FLUSH_CONSTRAINT = 4
//this defines the constraints for flushing the data pages
DAT_FLUSH_CONSTRAINT = 25
//this defines the constraints for freeing up the index pages for memory
IDX_RECLAIM_CONSTRAINT = 3
//this defines the constraints for freeing up the dat pages for memory
DAT_RECLAIM_CONSTRAINT = 7
//this indicates the speed at which the data is written
PAGE_WRITE_FACTOR = 128
//this indicates the speed at which data is read
PAGE_READ_FACTOR = 128
//this normalizes the idx vs dat pages, helpful when we favor one over other
IDX_DAT_NORMALIZE = 2
//the pre-fetch buffer max size, it can be lower than this but not greater (in MB)
PREFETCH_BUF_SIZE = 32
//pre-fetch scan window size
PREFETCH_SCAN_WINDOW_NUM = 24
//pre-fetch extent size
PREFETCH_EXTENT_NUM = 16
//**to select the key comparison function. Note that once selected this will never be allowed to change in future for a db.
//applicable only for Btree type. Default value 0 is lexicographical order for keys and value 1 is for quasi lexicographical order
//for ex; for keys {12, 1, 2}, for value 0 order will be { 1, 12 ,2} and with 1 it will be { 1, 2, 12} etc...
KEY_COMP_FUNCTION_ID = 1
//******* server configuration (applicable for master - slave model network db) *******
//***type of server master(0) or slave(1) (only for servers and not for the client)
SERVER_TYPE = 0
//**enable or disable replication
ENABLE_REPLICATION = 1
//***the ip address or name of the server
SERVER_ID = 127.0.0.1
//***port number where db service is running
SERV_PORT = 7888
//***in case of slave set the following two appropriately, note for master node these two will be same as SERVER_ID and SERV_PORT
//the ip address or name of the master server
MASTER_SERVER_ID = 127.0.0.1
//***master's port num
MASTER_SERV_PORT = 7888
//**2nd argument to listen(), the queue size for listen
LISTENQ = 10000
//***max slaves allowed in the master slave topology
MAX_SLAVES = 4
//**the ops record buffer size in MB(useful in replication). for 10M ops 500 MB should be good enough
OPS_REC_BUF_SIZE = 256
//***the ping frequency (default = 10s)
PING_FREQ = 10
//***the threshold for ping failure (num of times, default is 5)
PING_THRESHOLD = 5
//***the timeout value for client sockets, 0 means no timeout and positive value means time out in seconds
CLIENT_TIME_OUT = 120
//*stage options, basically it tells server to create the number of stages to handle the clients and their requests
//there are two types of stages supported as of now
//1. two stages, one for handling clients and other for handling the requests
//2. four stages, one for handling clients, one for read, one for ops and finally one for write
//default is option 1
SERVER_STAGE_OPTION = 1
//*how many workers for the ops stage. Note that for option 1, the workers would be for all the last three events (read, write, ops)
//suggested value for num of workers for option 1 is the number of processors on the machine and that's the default but you may change it
//for option 2, you should make it NPROC-2 (suggested for NPROC>2, else 1, this is default too for option 2) but you can choose any number
//value 0 means default else the exact num of workers
SERVER_OPS_WORKERS = 0