1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
|
/* Copyright (c) 2003-2007 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifndef NDB_LIMITS_H
#define NDB_LIMITS_H
#include <mysql.h>
#define RNIL 0xffffff00
/**
* Note that actual value = MAX_NODES - 1,
* since NodeId = 0 can not be used
*/
#define MAX_NDB_NODES 49
#define MAX_NODES 64
#define UNDEF_NODEGROUP 0xFFFF
/**************************************************************************
* IT SHOULD BE (MAX_NDB_NODES - 1).
* WHEN MAX_NDB_NODE IS CHANGED, IT SHOULD BE CHANGED ALSO
**************************************************************************/
#define MAX_DATA_NODE_ID 48
/**************************************************************************
* IT SHOULD BE (MAX_NODES - 1).
* WHEN MAX_NODES IS CHANGED, IT SHOULD BE CHANGED ALSO
**************************************************************************/
#define MAX_NODES_ID 63
/**
* MAX_API_NODES = MAX_NODES - No of NDB Nodes in use
*/
/**
* The maximum number of replicas in the system
*/
#define MAX_REPLICAS 4
/**
* The maximum number of local checkpoints stored at a time
*/
#define MAX_LCP_STORED 3
/**
* The maximum number of log execution rounds at system restart
*/
#define MAX_LOG_EXEC 4
/**
* The maximum number of tuples per page
**/
#define MAX_TUPLES_PER_PAGE 8191
#define MAX_TUPLES_BITS 13 /* 13 bits = 8191 tuples per page */
#define MAX_TABLES 20320 /* SchemaFile.hpp */
#define MAX_TAB_NAME_SIZE 128
#define MAX_ATTR_NAME_SIZE NAME_LEN /* From mysql_com.h */
#define MAX_ATTR_DEFAULT_VALUE_SIZE 128
#define MAX_ATTRIBUTES_IN_TABLE 128
#define MAX_ATTRIBUTES_IN_INDEX 32
#define MAX_TUPLE_SIZE_IN_WORDS 2013
#define MAX_KEY_SIZE_IN_WORDS 1023
#define MAX_FRM_DATA_SIZE 6000
#define MAX_NULL_BITS 4096
#define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES))
#define MAX_NDB_PARTITIONS 1024
#define MAX_RANGE_DATA (131072+MAX_NDB_PARTITIONS) //0.5 MByte of list data
#define MAX_WORDS_META_FILE 24576
#define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1)
/*
* Max Number of Records to fetch per SCAN_NEXTREQ in a scan in LQH. The
* API can order a multiple of this number of records at a time since
* fragments can be scanned in parallel.
*/
#define MAX_PARALLEL_OP_PER_SCAN 992
/*
* The default batch size. Configurable parameter.
*/
#define DEF_BATCH_SIZE 64
/*
* When calculating the number of records sent from LQH in each batch
* one uses SCAN_BATCH_SIZE divided by the expected size of signals
* per row. This gives the batch size used for the scan. The NDB API
* will receive one batch from each node at a time so there has to be
* some care taken also so that the NDB API is not overloaded with
* signals.
* This parameter is configurable, this is the default value.
*/
#define SCAN_BATCH_SIZE 32768
/*
* To protect the NDB API from overload we also define a maximum total
* batch size from all nodes. This parameter should most likely be
* configurable, or dependent on sendBufferSize.
* This parameter is configurable, this is the default value.
*/
#define MAX_SCAN_BATCH_SIZE 262144
/*
* Maximum number of Parallel Scan queries on one hash index fragment
*/
#define MAX_PARALLEL_SCANS_PER_FRAG 12
/*
* Maximum parallel ordered index scans per primary table fragment.
* Implementation limit is (256 minus 12).
*/
#define MAX_PARALLEL_INDEX_SCANS_PER_FRAG 32
/**
* Computed defines
*/
#define MAXNROFATTRIBUTESINWORDS (MAX_ATTRIBUTES_IN_TABLE / 32)
/*
* Ordered index constants. Make configurable per index later.
*/
#define MAX_TTREE_NODE_SIZE 64 /* total words in node */
#define MAX_TTREE_PREF_SIZE 4 /* words in min prefix */
#define MAX_TTREE_NODE_SLACK 2 /* diff between max and min occupancy */
/*
* Blobs.
*/
#define NDB_BLOB_HEAD_SIZE 2 /* sizeof(NdbBlob::Head) >> 2 */
/*
* Character sets.
*/
#define MAX_XFRM_MULTIPLY 8 /* max expansion when normalizing */
/**
* Disk data
*/
#define MAX_FILES_PER_FILEGROUP 1024
/**
* Page size in global page pool
*/
#define GLOBAL_PAGE_SIZE 32768
#define GLOBAL_PAGE_SIZE_WORDS 8192
/*
* Long signals
*/
#define NDB_SECTION_SEGMENT_SZ 60
/*
* Restore Buffer in pages
* 4M
*/
#define LCP_RESTORE_BUFFER (4*32)
#endif
|