diff options
author | Nick Vatamaniuc <vatamane@gmail.com> | 2021-11-20 01:00:08 -0500 |
---|---|---|
committer | Nick Vatamaniuc <nickva@users.noreply.github.com> | 2021-11-22 17:31:31 -0500 |
commit | b78ccf18cb4ed6e183ed0bf4e5cbe40d7a7dc493 (patch) | |
tree | 82158f0b6c7e97e6955bf0c558aac6eb0329b410 /src/couch | |
parent | 6e87e43fae23647b281ede250ad9f1a68a8f1cde (diff) | |
download | couchdb-b78ccf18cb4ed6e183ed0bf4e5cbe40d7a7dc493.tar.gz |
Apply erlfmt formatting to source tree
These exceptions from main were ported over to 3.x
```
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -491,6 +491,7 @@ extract_cookie(#httpd{mochi_req = MochiReq}) ->
end.
%%% end hack
+%% erlfmt-ignore
set_auth_handlers() ->
AuthenticationDefault = "{chttpd_auth, cookie_authentication_handler},
```
```
--- a/src/couch/src/couch_debug.erl
+++ b/src/couch/src/couch_debug.erl
@@ -49,6 +49,7 @@ help() ->
].
-spec help(Function :: atom()) -> ok.
+%% erlfmt-ignore
help(opened_files) ->
```
Diffstat (limited to 'src/couch')
111 files changed, 14154 insertions, 11283 deletions
diff --git a/src/couch/src/couch.erl b/src/couch/src/couch.erl index 1c912ac2a..6952c16c8 100644 --- a/src/couch/src/couch.erl +++ b/src/couch/src/couch.erl @@ -18,7 +18,6 @@ restart/0 ]). - deps() -> [ sasl, @@ -32,7 +31,6 @@ deps() -> couch_log ]. - start() -> catch erlang:system_flag(scheduler_bind_type, default_bind), case start_apps(deps()) of @@ -42,26 +40,23 @@ start() -> throw(Else) end. - stop() -> application:stop(couch). - restart() -> init:restart(). - start_apps([]) -> ok; -start_apps([App|Rest]) -> +start_apps([App | Rest]) -> case application:start(App) of - ok -> - start_apps(Rest); - {error, {already_started, App}} -> - start_apps(Rest); - {error, _Reason} when App =:= public_key -> - % ignore on R12B5 - start_apps(Rest); - {error, _Reason} -> - {error, {app_would_not_start, App}} + ok -> + start_apps(Rest); + {error, {already_started, App}} -> + start_apps(Rest); + {error, _Reason} when App =:= public_key -> + % ignore on R12B5 + start_apps(Rest); + {error, _Reason} -> + {error, {app_would_not_start, App}} end. diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl index 12ac4874c..b3b2f23eb 100644 --- a/src/couch/src/couch_att.erl +++ b/src/couch/src/couch_att.erl @@ -60,7 +60,6 @@ -include_lib("couch/include/couch_db.hrl"). - %% Legacy attachment record. This is going to be phased out by the new proplist %% based structure. It's needed for now to allow code to perform lazy upgrades %% while the patch is rolled out to the cluster. Attachments passed as records @@ -79,8 +78,13 @@ md5 = <<>> :: binary(), revpos = 0 :: non_neg_integer(), - data :: stub | follows | binary() | {any(), any()} | - {follows, pid(), reference()} | fun(() -> binary()), + data :: + stub + | follows + | binary() + | {any(), any()} + | {follows, pid(), reference()} + | fun(() -> binary()), %% Encoding of the attachment %% currently supported values are: @@ -90,7 +94,6 @@ encoding = identity :: identity | gzip }). - %% Extensible Attachment Type %% %% The following types describe the known properties for attachment fields @@ -99,57 +102,57 @@ %% be used by upgraded code. If you plan on operating on new data, please add %% an entry here as documentation. - %% The name of the attachment is also used as the mime-part name for file %% downloads. These must be unique per document. -type name_prop() :: {name, binary()}. - %% The mime type of the attachment. This does affect compression of certain %% attachments if the type is found to be configured as a compressable type. %% This is commonly reserved for text/* types but could include other custom %% cases as well. See definition and use of couch_util:compressable_att_type/1. -type type_prop() :: {type, binary()}. - %% The attachment length is similar to disk-length but ignores additional %% encoding that may have occurred. -type att_len_prop() :: {att_len, non_neg_integer()}. - %% The size of the attachment as stored in a disk stream. -type disk_len_prop() :: {disk_len, non_neg_integer()}. - %% This is a digest of the original attachment data as uploaded by the client. %% it's useful for checking validity of contents against other attachment data %% as well as quick digest computation of the enclosing document. -type md5_prop() :: {md5, binary()}. - -type revpos_prop() :: {revpos, 0}. - %% This field is currently overloaded with just about everything. The %% {any(), any()} type is just there until I have time to check the actual %% values expected. Over time this should be split into more than one property %% to allow simpler handling. -type data_prop() :: { - data, stub | follows | binary() | {any(), any()} | - {follows, pid(), reference()} | fun(() -> binary()) + data, + stub + | follows + | binary() + | {any(), any()} + | {follows, pid(), reference()} + | fun(() -> binary()) }. - %% We will occasionally compress our data. See type_prop() for more information %% on when this happens. -type encoding_prop() :: {encoding, identity | gzip}. - -type attachment() :: [ - name_prop() | type_prop() | - att_len_prop() | disk_len_prop() | - md5_prop() | revpos_prop() | - data_prop() | encoding_prop() + name_prop() + | type_prop() + | att_len_prop() + | disk_len_prop() + | md5_prop() + | revpos_prop() + | data_prop() + | encoding_prop() ]. -type disk_att_v1() :: { @@ -178,7 +181,7 @@ -type att() :: #att{} | attachment() | disk_att(). --define(GB, (1024*1024*1024)). +-define(GB, (1024 * 1024 * 1024)). new() -> %% We construct a record by default for compatability. This will be @@ -189,14 +192,13 @@ new() -> %% undefined. #att{}. - -spec new([{atom(), any()}]) -> att(). new(Props) -> store(Props, new()). - --spec fetch([atom()], att()) -> [any()]; - (atom(), att()) -> any(). +-spec fetch + ([atom()], att()) -> [any()]; + (atom(), att()) -> any(). fetch(Fields, Att) when is_list(Fields) -> [fetch(Field, Att) || Field <- Fields]; fetch(Field, Att) when is_list(Att) -> @@ -223,13 +225,15 @@ fetch(encoding, #att{encoding = Encoding}) -> fetch(_, _) -> undefined. - -spec store([{atom(), any()}], att()) -> att(). store(Props, Att0) -> - lists:foldl(fun({Field, Value}, Att) -> - store(Field, Value, Att) - end, Att0, Props). - + lists:foldl( + fun({Field, Value}, Att) -> + store(Field, Value, Att) + end, + Att0, + Props + ). -spec store(atom(), any(), att()) -> att(). store(Field, undefined, Att) when is_list(Att) -> @@ -255,17 +259,14 @@ store(encoding, Encoding, Att) -> store(Field, Value, Att) -> store(Field, Value, upgrade(Att)). - -spec transform(atom(), fun(), att()) -> att(). transform(Field, Fun, Att) -> NewValue = Fun(fetch(Field, Att)), store(Field, NewValue, Att). - is_stub(Att) -> stub == fetch(data, Att). - %% merge_stubs takes all stub attachments and replaces them with on disk %% attachments. It will return {missing, Name} if a stub isn't matched with %% an existing attachment on disk. If the revpos is supplied with the stub @@ -276,7 +277,6 @@ merge_stubs(MemAtts, DiskAtts) -> ), merge_stubs(MemAtts, OnDisk, []). - %% restore spec when R14 support is dropped %% -spec merge_stubs([att()], dict:dict(), [att()]) -> [att()]. merge_stubs([Att | Rest], OnDisk, Merged) -> @@ -305,23 +305,24 @@ merge_stubs([Att | Rest], OnDisk, Merged) -> merge_stubs([], _, Merged) -> {ok, lists:reverse(Merged)}. - size_info([]) -> {ok, []}; size_info(Atts) -> - Info = lists:map(fun(Att) -> - AttLen = fetch(att_len, Att), - case fetch(data, Att) of - {stream, StreamEngine} -> - {ok, SPos} = couch_stream:to_disk_term(StreamEngine), - {SPos, AttLen}; - {_, SPos} -> - {SPos, AttLen} - end - end, Atts), + Info = lists:map( + fun(Att) -> + AttLen = fetch(att_len, Att), + case fetch(data, Att) of + {stream, StreamEngine} -> + {ok, SPos} = couch_stream:to_disk_term(StreamEngine), + {SPos, AttLen}; + {_, SPos} -> + {SPos, AttLen} + end + end, + Atts + ), {ok, lists:usort(Info)}. - %% When converting an attachment to disk term format, attempt to stay with the %% old format when possible. This should help make the attachment lazy upgrade %% as safe as possible, avoiding the need for complicated disk versioning @@ -364,7 +365,6 @@ to_disk_term(Att) -> ), {list_to_tuple(lists:reverse(Base)), Extended}. - %% The new disk term format is a simple wrapper around the legacy format. Base %% properties will remain in a tuple while the new fields and possibly data from %% future extensions will be stored in a list of atom/value pairs. While this is @@ -372,45 +372,45 @@ to_disk_term(Att) -> %% compression to remove these sorts of common bits (block level compression %% with something like a shared dictionary that is checkpointed every now and %% then). -from_disk_term(StreamSrc, {Base, Extended}) - when is_tuple(Base), is_list(Extended) -> +from_disk_term(StreamSrc, {Base, Extended}) when + is_tuple(Base), is_list(Extended) +-> store(Extended, from_disk_term(StreamSrc, Base)); -from_disk_term(StreamSrc, {Name,Type,Sp,AttLen,DiskLen,RevPos,Md5,Enc}) -> +from_disk_term(StreamSrc, {Name, Type, Sp, AttLen, DiskLen, RevPos, Md5, Enc}) -> {ok, Stream} = open_stream(StreamSrc, Sp), #att{ - name=Name, - type=Type, - att_len=AttLen, - disk_len=DiskLen, - md5=Md5, - revpos=RevPos, - data={stream, Stream}, - encoding=upgrade_encoding(Enc) + name = Name, + type = Type, + att_len = AttLen, + disk_len = DiskLen, + md5 = Md5, + revpos = RevPos, + data = {stream, Stream}, + encoding = upgrade_encoding(Enc) }; -from_disk_term(StreamSrc, {Name,Type,Sp,AttLen,RevPos,Md5}) -> +from_disk_term(StreamSrc, {Name, Type, Sp, AttLen, RevPos, Md5}) -> {ok, Stream} = open_stream(StreamSrc, Sp), #att{ - name=Name, - type=Type, - att_len=AttLen, - disk_len=AttLen, - md5=Md5, - revpos=RevPos, - data={stream, Stream} + name = Name, + type = Type, + att_len = AttLen, + disk_len = AttLen, + md5 = Md5, + revpos = RevPos, + data = {stream, Stream} }; -from_disk_term(StreamSrc, {Name,{Type,Sp,AttLen}}) -> +from_disk_term(StreamSrc, {Name, {Type, Sp, AttLen}}) -> {ok, Stream} = open_stream(StreamSrc, Sp), #att{ - name=Name, - type=Type, - att_len=AttLen, - disk_len=AttLen, - md5= <<>>, - revpos=0, - data={stream, Stream} + name = Name, + type = Type, + att_len = AttLen, + disk_len = AttLen, + md5 = <<>>, + revpos = 0, + data = {stream, Stream} }. - %% from_json reads in embedded JSON attachments and creates usable attachment %% values. The attachment may be a stub, from_json(Name, Props) -> @@ -426,7 +426,6 @@ from_json(Name, Props) -> true -> inline_from_json(Att, Props) end. - stub_from_json(Att, Props) -> {DiskLen, EncodedLen, Encoding} = encoded_lengths_from_json(Props), Digest = digest_from_json(Props), @@ -434,21 +433,33 @@ stub_from_json(Att, Props) -> %% the revpos consistency check on stubs when it's not provided in the %% json object. See merge_stubs/3 for the stub check. RevPos = couch_util:get_value(<<"revpos">>, Props), - store([ - {md5, Digest}, {revpos, RevPos}, {data, stub}, {disk_len, DiskLen}, - {att_len, EncodedLen}, {encoding, Encoding} - ], Att). - + store( + [ + {md5, Digest}, + {revpos, RevPos}, + {data, stub}, + {disk_len, DiskLen}, + {att_len, EncodedLen}, + {encoding, Encoding} + ], + Att + ). follow_from_json(Att, Props) -> {DiskLen, EncodedLen, Encoding} = encoded_lengths_from_json(Props), Digest = digest_from_json(Props), RevPos = couch_util:get_value(<<"revpos">>, Props, 0), - store([ - {md5, Digest}, {revpos, RevPos}, {data, follows}, {disk_len, DiskLen}, - {att_len, EncodedLen}, {encoding, Encoding} - ], Att). - + store( + [ + {md5, Digest}, + {revpos, RevPos}, + {data, follows}, + {disk_len, DiskLen}, + {att_len, EncodedLen}, + {encoding, Encoding} + ], + Att + ). inline_from_json(Att, Props) -> B64Data = couch_util:get_value(<<"data">>, Props), @@ -456,19 +467,22 @@ inline_from_json(Att, Props) -> Data -> Length = size(Data), RevPos = couch_util:get_value(<<"revpos">>, Props, 0), - store([ - {data, Data}, {revpos, RevPos}, {disk_len, Length}, - {att_len, Length} - ], Att) + store( + [ + {data, Data}, + {revpos, RevPos}, + {disk_len, Length}, + {att_len, Length} + ], + Att + ) catch _:_ -> Name = fetch(name, Att), - ErrMsg = <<"Invalid attachment data for ", Name/binary>>, + ErrMsg = <<"Invalid attachment data for ", Name/binary>>, throw({bad_request, ErrMsg}) end. - - encoded_lengths_from_json(Props) -> Len = couch_util:get_value(<<"length">>, Props), case couch_util:get_value(<<"encoding">>, Props) of @@ -481,14 +495,12 @@ encoded_lengths_from_json(Props) -> end, {Len, EncodedLen, Encoding}. - digest_from_json(Props) -> case couch_util:get_value(<<"digest">>, Props) of <<"md5-", EncodedMd5/binary>> -> base64:decode(EncodedMd5); _ -> <<>> end. - to_json(Att, OutputData, DataToFollow, ShowEncoding) -> [Name, Data, DiskLen, AttLen, Enc, Type, RevPos, Md5] = fetch( [name, data, disk_len, att_len, encoding, type, revpos, md5], Att @@ -497,42 +509,45 @@ to_json(Att, OutputData, DataToFollow, ShowEncoding) -> {<<"content_type">>, Type}, {<<"revpos">>, RevPos} ], - DigestProp = case base64:encode(Md5) of - <<>> -> []; - Digest -> [{<<"digest">>, <<"md5-", Digest/binary>>}] - end, - DataProps = if - not OutputData orelse Data == stub -> - [{<<"length">>, DiskLen}, {<<"stub">>, true}]; - DataToFollow -> - [{<<"length">>, DiskLen}, {<<"follows">>, true}]; - true -> - AttData = case Enc of - gzip -> zlib:gunzip(to_binary(Att)); - identity -> to_binary(Att) - end, - [{<<"data">>, base64:encode(AttData)}] - end, - EncodingProps = if - ShowEncoding andalso Enc /= identity -> - [ - {<<"encoding">>, couch_util:to_binary(Enc)}, - {<<"encoded_length">>, AttLen} - ]; - true -> - [] - end, - HeadersProp = case fetch(headers, Att) of - undefined -> []; - Headers -> [{<<"headers">>, Headers}] - end, + DigestProp = + case base64:encode(Md5) of + <<>> -> []; + Digest -> [{<<"digest">>, <<"md5-", Digest/binary>>}] + end, + DataProps = + if + not OutputData orelse Data == stub -> + [{<<"length">>, DiskLen}, {<<"stub">>, true}]; + DataToFollow -> + [{<<"length">>, DiskLen}, {<<"follows">>, true}]; + true -> + AttData = + case Enc of + gzip -> zlib:gunzip(to_binary(Att)); + identity -> to_binary(Att) + end, + [{<<"data">>, base64:encode(AttData)}] + end, + EncodingProps = + if + ShowEncoding andalso Enc /= identity -> + [ + {<<"encoding">>, couch_util:to_binary(Enc)}, + {<<"encoded_length">>, AttLen} + ]; + true -> + [] + end, + HeadersProp = + case fetch(headers, Att) of + undefined -> []; + Headers -> [{<<"headers">>, Headers}] + end, {Name, {Props ++ DigestProp ++ DataProps ++ EncodingProps ++ HeadersProp}}. - flush(Db, Att) -> flush_data(Db, fetch(data, Att), Att). - flush_data(Db, Data, Att) when is_binary(Data) -> couch_db:with_stream(Db, Att, fun(OutputStream) -> couch_stream:write(OutputStream, Data) @@ -545,25 +560,29 @@ flush_data(Db, Fun, Att) when is_function(Fun) -> couch_db:with_stream(Db, Att, fun(OutputStream) -> % Fun(MaxChunkSize, WriterFun) must call WriterFun % once for each chunk of the attachment, - Fun(4096, + Fun( + 4096, % WriterFun({Length, Binary}, State) % WriterFun({0, _Footers}, State) % Called with Length == 0 on the last time. % WriterFun returns NewState. - fun({0, Footers}, _Total) -> - F = mochiweb_headers:from_binary(Footers), - case mochiweb_headers:get_value("Content-MD5", F) of - undefined -> - ok; - Md5 -> - {md5, base64:decode(Md5)} - end; - ({Length, Chunk}, Total0) -> - Total = Total0 + Length, - validate_attachment_size(AttName, Total, MaxAttSize), - couch_stream:write(OutputStream, Chunk), - Total - end, 0) + fun + ({0, Footers}, _Total) -> + F = mochiweb_headers:from_binary(Footers), + case mochiweb_headers:get_value("Content-MD5", F) of + undefined -> + ok; + Md5 -> + {md5, base64:decode(Md5)} + end; + ({Length, Chunk}, Total0) -> + Total = Total0 + Length, + validate_attachment_size(AttName, Total, MaxAttSize), + couch_stream:write(OutputStream, Chunk), + Total + end, + 0 + ) end); AttLen -> validate_attachment_size(AttName, AttLen, MaxAttSize), @@ -600,17 +619,18 @@ flush_data(Db, {stream, StreamEngine}, Att) -> end) end. - write_streamed_attachment(_Stream, _F, 0) -> ok; write_streamed_attachment(_Stream, _F, LenLeft) when LenLeft < 0 -> throw({bad_request, <<"attachment longer than expected">>}); write_streamed_attachment(Stream, F, LenLeft) when LenLeft > 0 -> - Bin = try read_next_chunk(F, LenLeft) - catch - {mp_parser_died, normal} -> - throw({bad_request, <<"attachment shorter than expected">>}) - end, + Bin = + try + read_next_chunk(F, LenLeft) + catch + {mp_parser_died, normal} -> + throw({bad_request, <<"attachment shorter than expected">>}) + end, ok = couch_stream:write(Stream, Bin), write_streamed_attachment(Stream, F, LenLeft - iolist_size(Bin)). @@ -619,11 +639,9 @@ read_next_chunk(F, _) when is_function(F, 0) -> read_next_chunk(F, LenLeft) when is_function(F, 1) -> F(lists:min([LenLeft, 16#2000])). - foldl(Att, Fun, Acc) -> foldl(fetch(data, Att), Att, Fun, Acc). - foldl(Bin, _Att, Fun, Acc) when is_binary(Bin) -> Fun(Bin, Acc); foldl({stream, StreamEngine}, Att, Fun, Acc) -> @@ -651,54 +669,51 @@ foldl({follows, Parser, Ref}, Att, Fun, Acc) -> erlang:demonitor(ParserRef, [flush]) end. - range_foldl(Att, From, To, Fun, Acc) -> {stream, StreamEngine} = fetch(data, Att), couch_stream:range_foldl(StreamEngine, From, To, Fun, Acc). - foldl_decode(Att, Fun, Acc) -> case fetch([data, encoding], Att) of [{stream, StreamEngine}, Enc] -> couch_stream:foldl_decode( - StreamEngine, fetch(md5, Att), Enc, Fun, Acc); + StreamEngine, fetch(md5, Att), Enc, Fun, Acc + ); [Fun2, identity] -> fold_streamed_data(Fun2, fetch(att_len, Att), Fun, Acc) end. - to_binary(Att) -> to_binary(fetch(data, Att), Att). - to_binary(Bin, _Att) when is_binary(Bin) -> Bin; to_binary(Iolist, _Att) when is_list(Iolist) -> iolist_to_binary(Iolist); to_binary({stream, _StreamEngine}, Att) -> iolist_to_binary( - lists:reverse(foldl(Att, fun(Bin,Acc) -> [Bin|Acc] end, [])) + lists:reverse(foldl(Att, fun(Bin, Acc) -> [Bin | Acc] end, [])) ); -to_binary(DataFun, Att) when is_function(DataFun)-> +to_binary(DataFun, Att) when is_function(DataFun) -> Len = fetch(att_len, Att), iolist_to_binary( - lists:reverse(fold_streamed_data( - DataFun, - Len, - fun(Data, Acc) -> [Data | Acc] end, - [] - )) + lists:reverse( + fold_streamed_data( + DataFun, + Len, + fun(Data, Acc) -> [Data | Acc] end, + [] + ) + ) ). - fold_streamed_data(_RcvFun, 0, _Fun, Acc) -> Acc; -fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0-> +fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0 -> Bin = RcvFun(), ResultAcc = Fun(Bin, Acc), fold_streamed_data(RcvFun, LenLeft - size(Bin), Fun, ResultAcc). - %% Upgrade an attachment record to a property list on demand. This is a one-way %% operation as downgrading potentially truncates fields with important data. -spec upgrade(#att{}) -> attachment(). @@ -712,7 +727,6 @@ upgrade(#att{} = Att) -> upgrade(Att) -> Att. - %% Downgrade is exposed for interactive convenience. In practice, unless done %% manually, upgrades are always one-way. downgrade(#att{} = Att) -> @@ -729,7 +743,6 @@ downgrade(Att) -> encoding = fetch(encoding, Att) }. - upgrade_encoding(true) -> gzip; upgrade_encoding(false) -> identity; upgrade_encoding(Encoding) -> Encoding. @@ -744,9 +757,12 @@ max_attachment_size(MaxAttSizeConfig) -> MaxAttSize when is_list(MaxAttSize) -> try list_to_integer(MaxAttSize) of Result -> Result - catch _:_ -> - couch_log:error("invalid config value for max attachment size: ~p ", [MaxAttSize]), - throw(internal_server_error) + catch + _:_ -> + couch_log:error("invalid config value for max attachment size: ~p ", [ + MaxAttSize + ]), + throw(internal_server_error) end; MaxAttSize when is_integer(MaxAttSize) -> MaxAttSize; @@ -755,14 +771,13 @@ max_attachment_size(MaxAttSizeConfig) -> throw(internal_server_error) end. - -validate_attachment_size(AttName, AttSize, MaxAttSize) - when is_integer(AttSize), AttSize > MaxAttSize -> +validate_attachment_size(AttName, AttSize, MaxAttSize) when + is_integer(AttSize), AttSize > MaxAttSize +-> throw({request_entity_too_large, {attachment, AttName}}); validate_attachment_size(_AttName, _AttSize, _MAxAttSize) -> ok. - open_stream(StreamSrc, Data) -> case couch_db:is_db(StreamSrc) of true -> @@ -776,7 +791,6 @@ open_stream(StreamSrc, Data) -> end end. - -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). @@ -785,28 +799,20 @@ open_stream(StreamSrc, Data) -> %% Test utilities - empty_att() -> new(). - upgraded_empty_att() -> new([{headers, undefined}]). - %% Test groups - attachment_upgrade_test_() -> {"Lazy record upgrade tests", [ {"Existing record fields don't upgrade", - {with, empty_att(), [fun test_non_upgrading_fields/1]} - }, - {"New fields upgrade", - {with, empty_att(), [fun test_upgrading_fields/1]} - } + {with, empty_att(), [fun test_non_upgrading_fields/1]}}, + {"New fields upgrade", {with, empty_att(), [fun test_upgrading_fields/1]}} ]}. - attachment_defaults_test_() -> {"Attachment defaults tests", [ {"Records retain old default values", [ @@ -827,14 +833,13 @@ attachment_field_api_test_() -> fun test_transform/0 ]}. - attachment_disk_term_test_() -> BaseAttachment = new([ {name, <<"empty">>}, {type, <<"application/octet-stream">>}, {att_len, 0}, {disk_len, 0}, - {md5, <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>}, + {md5, <<212, 29, 140, 217, 143, 0, 178, 4, 233, 128, 9, 152, 236, 248, 66, 126>>}, {revpos, 4}, {data, {stream, {couch_bt_engine_stream, {fake_fd, fake_sp}}}}, {encoding, identity} @@ -843,14 +848,16 @@ attachment_disk_term_test_() -> <<"empty">>, <<"application/octet-stream">>, fake_sp, - 0, 0, 4, - <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>, + 0, + 0, + 4, + <<212, 29, 140, 217, 143, 0, 178, 4, 233, 128, 9, 152, 236, 248, 66, 126>>, identity }, Headers = [{<<"X-Foo">>, <<"bar">>}], ExtendedAttachment = store(headers, Headers, BaseAttachment), ExtendedDiskTerm = {BaseDiskTerm, [{headers, Headers}]}, - FakeDb = test_util:fake_db([{engine, {couch_bt_engine, #st{fd=fake_fd}}}]), + FakeDb = test_util:fake_db([{engine, {couch_bt_engine, #st{fd = fake_fd}}}]), {"Disk term tests", [ ?_assertEqual(BaseDiskTerm, to_disk_term(BaseAttachment)), ?_assertEqual(BaseAttachment, from_disk_term(FakeDb, BaseDiskTerm)), @@ -858,7 +865,6 @@ attachment_disk_term_test_() -> ?_assertEqual(ExtendedAttachment, from_disk_term(FakeDb, ExtendedDiskTerm)) ]}. - attachment_json_term_test_() -> Props = [ {<<"content_type">>, <<"application/json">>}, @@ -892,16 +898,13 @@ attachment_json_term_test_() -> ?_assertThrow({bad_request, _}, inline_from_json(Att, InvalidProps)) ]}. - attachment_stub_merge_test_() -> %% Stub merging needs to demonstrate revpos matching, skipping, and missing %% attachment errors. {"Attachment stub merging tests", []}. - %% Test generators - test_non_upgrading_fields(Attachment) -> Pairs = [ {name, "cat.gif"}, @@ -919,8 +922,8 @@ test_non_upgrading_fields(Attachment) -> Updated = store(Field, Value, Attachment), ?assertMatch(#att{}, Updated) end, - Pairs). - + Pairs + ). test_upgrading_fields(Attachment) -> ?assertMatch(#att{}, Attachment), @@ -929,13 +932,11 @@ test_upgrading_fields(Attachment) -> UpdatedHeadersUndefined = store(headers, undefined, Attachment), ?assertMatch(X when is_list(X), UpdatedHeadersUndefined). - test_legacy_defaults(Attachment) -> ?assertEqual(<<>>, fetch(md5, Attachment)), ?assertEqual(0, fetch(revpos, Attachment)), ?assertEqual(identity, fetch(encoding, Attachment)). - test_elided_entries(Attachment) -> ?assertNot(lists:keymember(name, 1, Attachment)), ?assertNot(lists:keymember(type, 1, Attachment)), @@ -943,26 +944,22 @@ test_elided_entries(Attachment) -> ?assertNot(lists:keymember(disk_len, 1, Attachment)), ?assertNot(lists:keymember(data, 1, Attachment)). - test_construction() -> ?assert(new() == new()), Initialized = new([{name, <<"foo.bar">>}, {type, <<"application/qux">>}]), ?assertEqual(<<"foo.bar">>, fetch(name, Initialized)), ?assertEqual(<<"application/qux">>, fetch(type, Initialized)). - test_store_and_fetch() -> Attachment = empty_att(), ?assertEqual(<<"abc">>, fetch(name, store(name, <<"abc">>, Attachment))), ?assertEqual(42, fetch(ans, store(ans, 42, Attachment))). - test_transform() -> Attachment = new([{counter, 0}]), Transformed = transform(counter, fun(Count) -> Count + 1 end, Attachment), ?assertEqual(1, fetch(counter, Transformed)). - max_attachment_size_test_() -> {"Max attachment size tests", [ ?_assertEqual(infinity, max_attachment_size("infinity")), diff --git a/src/couch/src/couch_auth_cache.erl b/src/couch/src/couch_auth_cache.erl index c564cee00..f361ab231 100644 --- a/src/couch/src/couch_auth_cache.erl +++ b/src/couch/src/couch_auth_cache.erl @@ -12,7 +12,6 @@ -module(couch_auth_cache). - -export([ get_user_creds/1, get_user_creds/2, @@ -23,35 +22,33 @@ ensure_users_db_exists/0 ]). - -include_lib("couch/include/couch_db.hrl"). -include_lib("couch/include/couch_js_functions.hrl"). - --spec get_user_creds(UserName::string() | binary()) -> - {ok, Credentials::list(), term()} | nil. +-spec get_user_creds(UserName :: string() | binary()) -> + {ok, Credentials :: list(), term()} | nil. get_user_creds(UserName) -> get_user_creds(nil, UserName). --spec get_user_creds(Req::#httpd{} | nil, UserName::string() | binary()) -> - {ok, Credentials::list(), term()} | nil. +-spec get_user_creds(Req :: #httpd{} | nil, UserName :: string() | binary()) -> + {ok, Credentials :: list(), term()} | nil. get_user_creds(Req, UserName) when is_list(UserName) -> get_user_creds(Req, ?l2b(UserName)); - get_user_creds(_Req, UserName) -> - UserCreds = case get_admin(UserName) of - nil -> - get_from_db(UserName); - Props -> - case get_from_db(UserName) of - nil -> - Props; - UserProps when is_list(UserProps) -> - add_roles(Props, couch_util:get_value(<<"roles">>, UserProps)) - end - end, + UserCreds = + case get_admin(UserName) of + nil -> + get_from_db(UserName); + Props -> + case get_from_db(UserName) of + nil -> + Props; + UserProps when is_list(UserProps) -> + add_roles(Props, couch_util:get_value(<<"roles">>, UserProps)) + end + end, validate_user_creds(UserCreds). update_user_creds(_Req, UserDoc, _AuthCtx) -> @@ -69,31 +66,34 @@ get_admin(UserName) when is_binary(UserName) -> get_admin(?b2l(UserName)); get_admin(UserName) when is_list(UserName) -> case config:get("admins", UserName) of - "-hashed-" ++ HashedPwdAndSalt -> - % the name is an admin, now check to see if there is a user doc - % which has a matching name, salt, and password_sha - [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","), - make_admin_doc(HashedPwd, Salt); - "-pbkdf2-" ++ HashedPwdSaltAndIterations -> - [HashedPwd, Salt, Iterations] = string:tokens(HashedPwdSaltAndIterations, ","), - make_admin_doc(HashedPwd, Salt, Iterations); - _Else -> - nil + "-hashed-" ++ HashedPwdAndSalt -> + % the name is an admin, now check to see if there is a user doc + % which has a matching name, salt, and password_sha + [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","), + make_admin_doc(HashedPwd, Salt); + "-pbkdf2-" ++ HashedPwdSaltAndIterations -> + [HashedPwd, Salt, Iterations] = string:tokens(HashedPwdSaltAndIterations, ","), + make_admin_doc(HashedPwd, Salt, Iterations); + _Else -> + nil end. make_admin_doc(HashedPwd, Salt) -> - [{<<"roles">>, [<<"_admin">>]}, - {<<"salt">>, ?l2b(Salt)}, - {<<"password_scheme">>, <<"simple">>}, - {<<"password_sha">>, ?l2b(HashedPwd)}]. + [ + {<<"roles">>, [<<"_admin">>]}, + {<<"salt">>, ?l2b(Salt)}, + {<<"password_scheme">>, <<"simple">>}, + {<<"password_sha">>, ?l2b(HashedPwd)} + ]. make_admin_doc(DerivedKey, Salt, Iterations) -> - [{<<"roles">>, [<<"_admin">>]}, - {<<"salt">>, ?l2b(Salt)}, - {<<"iterations">>, list_to_integer(Iterations)}, - {<<"password_scheme">>, <<"pbkdf2">>}, - {<<"derived_key">>, ?l2b(DerivedKey)}]. - + [ + {<<"roles">>, [<<"_admin">>]}, + {<<"salt">>, ?l2b(Salt)}, + {<<"iterations">>, list_to_integer(Iterations)}, + {<<"password_scheme">>, <<"pbkdf2">>}, + {<<"derived_key">>, ?l2b(DerivedKey)} + ]. get_from_db(UserName) -> ok = ensure_users_db_exists(), @@ -104,69 +104,69 @@ get_from_db(UserName) -> {DocProps} = couch_doc:to_json_obj(Doc, []), DocProps catch - _:_Error -> - nil + _:_Error -> + nil end end). - validate_user_creds(nil) -> nil; validate_user_creds(UserCreds) -> case couch_util:get_value(<<"_conflicts">>, UserCreds) of - undefined -> - ok; - _ConflictList -> - throw({unauthorized, - <<"User document conflicts must be resolved before the document", - " is used for authentication purposes.">> - }) + undefined -> + ok; + _ConflictList -> + throw( + {unauthorized, + <<"User document conflicts must be resolved before the document", + " is used for authentication purposes.">>} + ) end, {ok, UserCreds, nil}. - users_db() -> DbNameList = config:get("couch_httpd_auth", "authentication_db", "_users"), ?l2b(DbNameList). - ensure_users_db_exists() -> Options = [?ADMIN_CTX, nologifmissing], case couch_db:open(users_db(), Options) of - {ok, Db} -> - ensure_auth_ddoc_exists(Db, <<"_design/_auth">>), - couch_db:close(Db); - _Error -> - {ok, Db} = couch_db:create(users_db(), Options), - ok = ensure_auth_ddoc_exists(Db, <<"_design/_auth">>), - couch_db:close(Db) + {ok, Db} -> + ensure_auth_ddoc_exists(Db, <<"_design/_auth">>), + couch_db:close(Db); + _Error -> + {ok, Db} = couch_db:create(users_db(), Options), + ok = ensure_auth_ddoc_exists(Db, <<"_design/_auth">>), + couch_db:close(Db) end, ok. - ensure_auth_ddoc_exists(Db, DDocId) -> case couch_db:open_doc(Db, DDocId) of - {not_found, _Reason} -> - {ok, AuthDesign} = auth_design_doc(DDocId), - {ok, _Rev} = couch_db:update_doc(Db, AuthDesign, []); - {ok, Doc} -> - {Props} = couch_doc:to_json_obj(Doc, []), - case couch_util:get_value(<<"validate_doc_update">>, Props, []) of - ?AUTH_DB_DOC_VALIDATE_FUNCTION -> - ok; - _ -> - Props1 = lists:keyreplace(<<"validate_doc_update">>, 1, Props, - {<<"validate_doc_update">>, - ?AUTH_DB_DOC_VALIDATE_FUNCTION}), - couch_db:update_doc(Db, couch_doc:from_json_obj({Props1}), []) - end + {not_found, _Reason} -> + {ok, AuthDesign} = auth_design_doc(DDocId), + {ok, _Rev} = couch_db:update_doc(Db, AuthDesign, []); + {ok, Doc} -> + {Props} = couch_doc:to_json_obj(Doc, []), + case couch_util:get_value(<<"validate_doc_update">>, Props, []) of + ?AUTH_DB_DOC_VALIDATE_FUNCTION -> + ok; + _ -> + Props1 = lists:keyreplace( + <<"validate_doc_update">>, + 1, + Props, + {<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION} + ), + couch_db:update_doc(Db, couch_doc:from_json_obj({Props1}), []) + end end, ok. auth_design_doc(DocId) -> DocProps = [ {<<"_id">>, DocId}, - {<<"language">>,<<"javascript">>}, + {<<"language">>, <<"javascript">>}, {<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION} ], {ok, couch_doc:from_json_obj({DocProps})}. diff --git a/src/couch/src/couch_base32.erl b/src/couch/src/couch_base32.erl index d8d754f5e..776fe773d 100644 --- a/src/couch/src/couch_base32.erl +++ b/src/couch/src/couch_base32.erl @@ -16,7 +16,6 @@ -define(SET, <<"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567">>). - -spec encode(binary()) -> binary(). encode(Plain) when is_binary(Plain) -> IoList = encode(Plain, 0, byte_size(Plain) * 8, []), @@ -24,54 +23,63 @@ encode(Plain) when is_binary(Plain) -> encode(_Plain, _ByteOffset, 0, Acc) -> Acc; - encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 8 -> <<A:5, B:3>> = binary:part(Plain, ByteOffset, 1), - [<<(binary:at(?SET, A)), - (binary:at(?SET, B bsl 2)), - "======">> | Acc]; - + [<<(binary:at(?SET, A)), (binary:at(?SET, B bsl 2)), "======">> | Acc]; encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 16 -> <<A:5, B:5, C:5, D:1>> = binary:part(Plain, ByteOffset, 2), - [<<(binary:at(?SET, A)), - (binary:at(?SET, B)), - (binary:at(?SET, C)), - (binary:at(?SET, D bsl 4)), - "====">> | Acc]; - + [ + << + (binary:at(?SET, A)), + (binary:at(?SET, B)), + (binary:at(?SET, C)), + (binary:at(?SET, D bsl 4)), + "====" + >> + | Acc + ]; encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 24 -> <<A:5, B:5, C:5, D:5, E:4>> = binary:part(Plain, ByteOffset, 3), - [<<(binary:at(?SET, A)), - (binary:at(?SET, B)), - (binary:at(?SET, C)), - (binary:at(?SET, D)), - (binary:at(?SET, E bsl 1)), - "===">> | Acc]; - + [ + << + (binary:at(?SET, A)), + (binary:at(?SET, B)), + (binary:at(?SET, C)), + (binary:at(?SET, D)), + (binary:at(?SET, E bsl 1)), + "===" + >> + | Acc + ]; encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 32 -> <<A:5, B:5, C:5, D:5, E:5, F:5, G:2>> = binary:part(Plain, ByteOffset, 4), - [<<(binary:at(?SET, A)), - (binary:at(?SET, B)), - (binary:at(?SET, C)), - (binary:at(?SET, D)), - (binary:at(?SET, E)), - (binary:at(?SET, F)), - (binary:at(?SET, G bsl 3)), - "=">> | Acc]; - + [ + << + (binary:at(?SET, A)), + (binary:at(?SET, B)), + (binary:at(?SET, C)), + (binary:at(?SET, D)), + (binary:at(?SET, E)), + (binary:at(?SET, F)), + (binary:at(?SET, G bsl 3)), + "=" + >> + | Acc + ]; encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining >= 40 -> <<A:5, B:5, C:5, D:5, E:5, F:5, G:5, H:5>> = binary:part(Plain, ByteOffset, 5), - Output = <<(binary:at(?SET, A)), - (binary:at(?SET, B)), - (binary:at(?SET, C)), - (binary:at(?SET, D)), - (binary:at(?SET, E)), - (binary:at(?SET, F)), - (binary:at(?SET, G)), - (binary:at(?SET, H))>>, - encode(Plain, ByteOffset + 5, BitsRemaining - 40, [Output | Acc]). - + Output = << + (binary:at(?SET, A)), + (binary:at(?SET, B)), + (binary:at(?SET, C)), + (binary:at(?SET, D)), + (binary:at(?SET, E)), + (binary:at(?SET, F)), + (binary:at(?SET, G)), + (binary:at(?SET, H)) + >>, + encode(Plain, ByteOffset + 5, BitsRemaining - 40, [Output | Acc]). -spec decode(binary()) -> binary(). decode(Encoded) when is_binary(Encoded) -> @@ -83,39 +91,60 @@ decode(Encoded, ByteOffset, Acc) when ByteOffset == byte_size(Encoded) -> decode(Encoded, ByteOffset, Acc) -> case binary:part(Encoded, ByteOffset, 8) of <<A:1/binary, B:1/binary, "======">> -> - [<<(find_in_set(A)):5, - (find_in_set(B) bsr 2):3>> | Acc]; + [<<(find_in_set(A)):5, (find_in_set(B) bsr 2):3>> | Acc]; <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, "====">> -> - [<<(find_in_set(A)):5, - (find_in_set(B)):5, - (find_in_set(C)):5, - (find_in_set(D) bsr 4):1>> | Acc]; + [ + << + (find_in_set(A)):5, + (find_in_set(B)):5, + (find_in_set(C)):5, + (find_in_set(D) bsr 4):1 + >> + | Acc + ]; <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, "===">> -> - [<<(find_in_set(A)):5, - (find_in_set(B)):5, - (find_in_set(C)):5, - (find_in_set(D)):5, - (find_in_set(E) bsr 1):4>> | Acc]; - <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, - E:1/binary, F:1/binary, G:1/binary, "=">> -> - [<<(find_in_set(A)):5, - (find_in_set(B)):5, - (find_in_set(C)):5, - (find_in_set(D)):5, - (find_in_set(E)):5, - (find_in_set(F)):5, - (find_in_set(G) bsr 3):2>> | Acc]; - <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, - E:1/binary, F:1/binary, G:1/binary, H:1/binary>> -> - decode(Encoded, ByteOffset + 8, - [<<(find_in_set(A)):5, - (find_in_set(B)):5, - (find_in_set(C)):5, - (find_in_set(D)):5, - (find_in_set(E)):5, - (find_in_set(F)):5, - (find_in_set(G)):5, - (find_in_set(H)):5>> | Acc]) + [ + << + (find_in_set(A)):5, + (find_in_set(B)):5, + (find_in_set(C)):5, + (find_in_set(D)):5, + (find_in_set(E) bsr 1):4 + >> + | Acc + ]; + <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, F:1/binary, G:1/binary, "=">> -> + [ + << + (find_in_set(A)):5, + (find_in_set(B)):5, + (find_in_set(C)):5, + (find_in_set(D)):5, + (find_in_set(E)):5, + (find_in_set(F)):5, + (find_in_set(G) bsr 3):2 + >> + | Acc + ]; + <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, F:1/binary, G:1/binary, + H:1/binary>> -> + decode( + Encoded, + ByteOffset + 8, + [ + << + (find_in_set(A)):5, + (find_in_set(B)):5, + (find_in_set(C)):5, + (find_in_set(D)):5, + (find_in_set(E)):5, + (find_in_set(F)):5, + (find_in_set(G)):5, + (find_in_set(H)):5 + >> + | Acc + ] + ) end. find_in_set(Char) -> diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl index 48e751a82..7d2390556 100644 --- a/src/couch/src/couch_bt_engine.erl +++ b/src/couch/src/couch_bt_engine.erl @@ -80,12 +80,10 @@ finish_compaction/4 ]). - -export([ init_state/4 ]). - -export([ id_tree_split/1, id_tree_join/2, @@ -105,7 +103,6 @@ purge_seq_tree_join/2 ]). - % Used by the compactor -export([ update_header/2, @@ -113,12 +110,10 @@ copy_props/2 ]). - -include_lib("kernel/include/file.hrl"). -include_lib("couch/include/couch_db.hrl"). -include("couch_bt_engine.hrl"). - exists(FilePath) -> case is_file(FilePath) of true -> @@ -127,7 +122,6 @@ exists(FilePath) -> is_file(FilePath ++ ".compact") end. - delete(RootDir, FilePath, Async) -> %% Delete any leftover compaction files. If we don't do this a %% subsequent request for this DB will try to open them to use @@ -137,70 +131,69 @@ delete(RootDir, FilePath, Async) -> % Delete the actual database file couch_file:delete(RootDir, FilePath, Async). - delete_compaction_files(RootDir, FilePath, DelOpts) -> - lists:foreach(fun(Ext) -> - couch_file:delete(RootDir, FilePath ++ Ext, DelOpts) - end, [".compact", ".compact.data", ".compact.meta"]). - + lists:foreach( + fun(Ext) -> + couch_file:delete(RootDir, FilePath ++ Ext, DelOpts) + end, + [".compact", ".compact.data", ".compact.meta"] + ). init(FilePath, Options) -> {ok, Fd} = open_db_file(FilePath, Options), - Header = case lists:member(create, Options) of - true -> - delete_compaction_files(FilePath), - Header0 = couch_bt_engine_header:new(), - Header1 = init_set_props(Fd, Header0, Options), - ok = couch_file:write_header(Fd, Header1), - Header1; - false -> - case couch_file:read_header(Fd) of - {ok, Header0} -> - Header0; - no_valid_header -> - delete_compaction_files(FilePath), - Header0 = couch_bt_engine_header:new(), - ok = couch_file:write_header(Fd, Header0), - Header0 - end - end, + Header = + case lists:member(create, Options) of + true -> + delete_compaction_files(FilePath), + Header0 = couch_bt_engine_header:new(), + Header1 = init_set_props(Fd, Header0, Options), + ok = couch_file:write_header(Fd, Header1), + Header1; + false -> + case couch_file:read_header(Fd) of + {ok, Header0} -> + Header0; + no_valid_header -> + delete_compaction_files(FilePath), + Header0 = couch_bt_engine_header:new(), + ok = couch_file:write_header(Fd, Header0), + Header0 + end + end, {ok, init_state(FilePath, Fd, Header, Options)}. - terminate(_Reason, St) -> % If the reason we died is because our fd disappeared % then we don't need to try closing it again. Ref = St#st.fd_monitor, - if Ref == closed -> ok; true -> - ok = couch_file:close(St#st.fd), - receive - {'DOWN', Ref, _, _, _} -> - ok + if + Ref == closed -> + ok; + true -> + ok = couch_file:close(St#st.fd), + receive + {'DOWN', Ref, _, _, _} -> + ok after 500 -> ok - end + end end, couch_util:shutdown_sync(St#st.fd), ok. - handle_db_updater_call(Msg, St) -> {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}. - -handle_db_updater_info({'DOWN', Ref, _, _, _}, #st{fd_monitor=Ref} = St) -> - {stop, normal, St#st{fd=undefined, fd_monitor=closed}}. - +handle_db_updater_info({'DOWN', Ref, _, _, _}, #st{fd_monitor = Ref} = St) -> + {stop, normal, St#st{fd = undefined, fd_monitor = closed}}. incref(St) -> {ok, St#st{fd_monitor = erlang:monitor(process, St#st.fd)}}. - decref(St) -> true = erlang:demonitor(St#st.fd_monitor, [flush]), ok. - monitored_by(St) -> case erlang:process_info(St#st.fd, monitored_by) of {monitored_by, Pids} -> @@ -209,33 +202,26 @@ monitored_by(St) -> [] end. - last_activity(#st{fd = Fd}) -> couch_file:last_read(Fd). - get_compacted_seq(#st{header = Header}) -> couch_bt_engine_header:get(Header, compacted_seq). - get_del_doc_count(#st{} = St) -> {ok, Reds} = couch_btree:full_reduce(St#st.id_tree), element(2, Reds). - get_disk_version(#st{header = Header}) -> couch_bt_engine_header:get(Header, disk_version). - get_doc_count(#st{} = St) -> {ok, Reds} = couch_btree:full_reduce(St#st.id_tree), element(1, Reds). - get_epochs(#st{header = Header}) -> couch_bt_engine_header:get(Header, epochs). - get_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) -> Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) -> {stop, PurgeSeq} @@ -243,7 +229,6 @@ get_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) -> {ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, [{dir, rev}]), PurgeSeq. - get_oldest_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) -> Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) -> {stop, PurgeSeq} @@ -251,27 +236,25 @@ get_oldest_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) -> {ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, []), PurgeSeq. - get_purge_infos_limit(#st{header = Header}) -> couch_bt_engine_header:get(Header, purge_infos_limit). - get_revs_limit(#st{header = Header}) -> couch_bt_engine_header:get(Header, revs_limit). - get_size_info(#st{} = St) -> {ok, FileSize} = couch_file:bytes(St#st.fd), {ok, DbReduction} = couch_btree:full_reduce(St#st.id_tree), SizeInfo0 = element(3, DbReduction), - SizeInfo = case SizeInfo0 of - SI when is_record(SI, size_info) -> - SI; - {AS, ES} -> - #size_info{active=AS, external=ES}; - AS -> - #size_info{active=AS} - end, + SizeInfo = + case SizeInfo0 of + SI when is_record(SI, size_info) -> + SI; + {AS, ES} -> + #size_info{active = AS, external = ES}; + AS -> + #size_info{active = AS} + end, ActiveSize = active_size(St, SizeInfo), ExternalSize = SizeInfo#size_info.external, [ @@ -280,7 +263,6 @@ get_size_info(#st{} = St) -> {file, FileSize} ]. - partition_size_cb(traverse, Key, {DC, DDC, Sizes}, {Partition, DCAcc, DDCAcc, SizesAcc}) -> case couch_partition:is_member(Key, Partition) of true -> @@ -288,22 +270,18 @@ partition_size_cb(traverse, Key, {DC, DDC, Sizes}, {Partition, DCAcc, DDCAcc, Si false -> {ok, {Partition, DCAcc, DDCAcc, SizesAcc}} end; - partition_size_cb(visit, FDI, _PrevReds, {Partition, DCAcc, DDCAcc, Acc}) -> InPartition = couch_partition:is_member(FDI#full_doc_info.id, Partition), Deleted = FDI#full_doc_info.deleted, case {InPartition, Deleted} of {true, true} -> - {ok, {Partition, DCAcc, DDCAcc + 1, - reduce_sizes(FDI#full_doc_info.sizes, Acc)}}; + {ok, {Partition, DCAcc, DDCAcc + 1, reduce_sizes(FDI#full_doc_info.sizes, Acc)}}; {true, false} -> - {ok, {Partition, DCAcc + 1, DDCAcc, - reduce_sizes(FDI#full_doc_info.sizes, Acc)}}; + {ok, {Partition, DCAcc + 1, DDCAcc, reduce_sizes(FDI#full_doc_info.sizes, Acc)}}; {false, _} -> {ok, {Partition, DCAcc, DDCAcc, Acc}} end. - get_partition_info(#st{} = St, Partition) -> StartKey = couch_partition:start_key(Partition), EndKey = couch_partition:end_key(Partition), @@ -322,7 +300,6 @@ get_partition_info(#st{} = St, Partition) -> ]} ]. - get_security(#st{header = Header} = St) -> case couch_bt_engine_header:get(Header, security_ptr) of undefined -> @@ -332,7 +309,6 @@ get_security(#st{header = Header} = St) -> SecProps end. - get_props(#st{header = Header} = St) -> case couch_bt_engine_header:get(Header, props_ptr) of undefined -> @@ -342,15 +318,12 @@ get_props(#st{header = Header} = St) -> Props end. - get_update_seq(#st{header = Header}) -> couch_bt_engine_header:get(Header, update_seq). - get_uuid(#st{header = Header}) -> couch_bt_engine_header:get(Header, uuid). - set_revs_limit(#st{header = Header} = St, RevsLimit) -> NewSt = St#st{ header = couch_bt_engine_header:set(Header, [ @@ -360,7 +333,6 @@ set_revs_limit(#st{header = Header} = St, RevsLimit) -> }, {ok, increment_update_seq(NewSt)}. - set_purge_infos_limit(#st{header = Header} = St, PurgeInfosLimit) -> NewSt = St#st{ header = couch_bt_engine_header:set(Header, [ @@ -370,7 +342,6 @@ set_purge_infos_limit(#st{header = Header} = St, PurgeInfosLimit) -> }, {ok, increment_update_seq(NewSt)}. - set_security(#st{header = Header} = St, NewSecurity) -> Options = [{compression, St#st.compression}], {ok, Ptr, _} = couch_file:append_term(St#st.fd, NewSecurity, Options), @@ -382,7 +353,6 @@ set_security(#st{header = Header} = St, NewSecurity) -> }, {ok, increment_update_seq(NewSt)}. - set_props(#st{header = Header} = St, Props) -> Options = [{compression, St#st.compression}], {ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options), @@ -394,22 +364,25 @@ set_props(#st{header = Header} = St, Props) -> }, {ok, increment_update_seq(NewSt)}. - open_docs(#st{} = St, DocIds) -> Results = couch_btree:lookup(St#st.id_tree, DocIds), - lists:map(fun - ({ok, FDI}) -> FDI; - (not_found) -> not_found - end, Results). - + lists:map( + fun + ({ok, FDI}) -> FDI; + (not_found) -> not_found + end, + Results + ). open_local_docs(#st{} = St, DocIds) -> Results = couch_btree:lookup(St#st.local_tree, DocIds), - lists:map(fun - ({ok, Doc}) -> Doc; - (not_found) -> not_found - end, Results). - + lists:map( + fun + ({ok, Doc}) -> Doc; + (not_found) -> not_found + end, + Results + ). read_doc_body(#st{} = St, #doc{} = Doc) -> {ok, {Body, Atts}} = couch_file:pread_term(St#st.fd, Doc#doc.body), @@ -418,14 +391,15 @@ read_doc_body(#st{} = St, #doc{} = Doc) -> atts = Atts }. - load_purge_infos(St, UUIDs) -> Results = couch_btree:lookup(St#st.purge_tree, UUIDs), - lists:map(fun - ({ok, Info}) -> Info; - (not_found) -> not_found - end, Results). - + lists:map( + fun + ({ok, Info}) -> Info; + (not_found) -> not_found + end, + Results + ). serialize_doc(#st{} = St, #doc{} = Doc) -> Compress = fun(Term) -> @@ -449,7 +423,6 @@ serialize_doc(#st{} = St, #doc{} = Doc) -> meta = [{comp_body, Body} | Doc#doc.meta] }. - write_doc_body(St, #doc{} = Doc) -> #st{ fd = Fd @@ -457,46 +430,57 @@ write_doc_body(St, #doc{} = Doc) -> {ok, Ptr, Written} = couch_file:append_raw_chunk(Fd, Doc#doc.body), {ok, Doc#doc{body = Ptr}, Written}. - write_doc_infos(#st{} = St, Pairs, LocalDocs) -> #st{ id_tree = IdTree, seq_tree = SeqTree, local_tree = LocalTree } = St, - FinalAcc = lists:foldl(fun({OldFDI, NewFDI}, Acc) -> - {AddAcc, RemIdsAcc, RemSeqsAcc} = Acc, - case {OldFDI, NewFDI} of - {not_found, #full_doc_info{}} -> - {[NewFDI | AddAcc], RemIdsAcc, RemSeqsAcc}; - {#full_doc_info{id = Id}, #full_doc_info{id = Id}} -> - NewAddAcc = [NewFDI | AddAcc], - NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc], - {NewAddAcc, RemIdsAcc, NewRemSeqsAcc}; - {#full_doc_info{id = Id}, not_found} -> - NewRemIdsAcc = [Id | RemIdsAcc], - NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc], - {AddAcc, NewRemIdsAcc, NewRemSeqsAcc} - end - end, {[], [], []}, Pairs), + FinalAcc = lists:foldl( + fun({OldFDI, NewFDI}, Acc) -> + {AddAcc, RemIdsAcc, RemSeqsAcc} = Acc, + case {OldFDI, NewFDI} of + {not_found, #full_doc_info{}} -> + {[NewFDI | AddAcc], RemIdsAcc, RemSeqsAcc}; + {#full_doc_info{id = Id}, #full_doc_info{id = Id}} -> + NewAddAcc = [NewFDI | AddAcc], + NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc], + {NewAddAcc, RemIdsAcc, NewRemSeqsAcc}; + {#full_doc_info{id = Id}, not_found} -> + NewRemIdsAcc = [Id | RemIdsAcc], + NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc], + {AddAcc, NewRemIdsAcc, NewRemSeqsAcc} + end + end, + {[], [], []}, + Pairs + ), {Add, RemIds, RemSeqs} = FinalAcc, {ok, IdTree2} = couch_btree:add_remove(IdTree, Add, RemIds), {ok, SeqTree2} = couch_btree:add_remove(SeqTree, Add, RemSeqs), - {AddLDocs, RemLDocIds} = lists:foldl(fun(Doc, {AddAcc, RemAcc}) -> - case Doc#doc.deleted of - true -> - {AddAcc, [Doc#doc.id | RemAcc]}; - false -> - {[Doc | AddAcc], RemAcc} - end - end, {[], []}, LocalDocs), + {AddLDocs, RemLDocIds} = lists:foldl( + fun(Doc, {AddAcc, RemAcc}) -> + case Doc#doc.deleted of + true -> + {AddAcc, [Doc#doc.id | RemAcc]}; + false -> + {[Doc | AddAcc], RemAcc} + end + end, + {[], []}, + LocalDocs + ), {ok, LocalTree2} = couch_btree:add_remove(LocalTree, AddLDocs, RemLDocIds), - NewUpdateSeq = lists:foldl(fun(#full_doc_info{update_seq=Seq}, Acc) -> - erlang:max(Seq, Acc) - end, get_update_seq(St), Add), + NewUpdateSeq = lists:foldl( + fun(#full_doc_info{update_seq = Seq}, Acc) -> + erlang:max(Seq, Acc) + end, + get_update_seq(St), + Add + ), NewHeader = couch_bt_engine_header:set(St#st.header, [ {update_seq, NewUpdateSeq} @@ -510,7 +494,6 @@ write_doc_infos(#st{} = St, Pairs, LocalDocs) -> needs_commit = true }}. - purge_docs(#st{} = St, Pairs, PurgeInfos) -> #st{ id_tree = IdTree, @@ -529,10 +512,11 @@ purge_docs(#st{} = St, Pairs, PurgeInfos) -> % We bump NewUpdateSeq because we have to ensure that % indexers see that they need to process the new purge % information. - UpdateSeq = case NewSeq == CurrSeq of - true -> CurrSeq + 1; - false -> NewSeq - end, + UpdateSeq = + case NewSeq == CurrSeq of + true -> CurrSeq + 1; + false -> NewSeq + end, Header = couch_bt_engine_header:set(St#st.header, [ {update_seq, UpdateSeq} ]), @@ -550,7 +534,6 @@ purge_docs(#st{} = St, Pairs, PurgeInfos) -> needs_commit = true }}. - copy_purge_infos(#st{} = St, PurgeInfos) -> #st{ purge_tree = PurgeTree, @@ -559,12 +542,11 @@ copy_purge_infos(#st{} = St, PurgeInfos) -> {ok, PurgeTree2} = couch_btree:add(PurgeTree, PurgeInfos), {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, PurgeInfos), {ok, St#st{ - purge_tree = PurgeTree2, - purge_seq_tree = PurgeSeqTree2, - needs_commit = true + purge_tree = PurgeTree2, + purge_seq_tree = PurgeSeqTree2, + needs_commit = true }}. - commit_data(St) -> #st{ fd = Fd, @@ -587,32 +569,26 @@ commit_data(St) -> {ok, St} end. - open_write_stream(#st{} = St, Options) -> couch_stream:open({couch_bt_engine_stream, {St#st.fd, []}}, Options). - open_read_stream(#st{} = St, StreamSt) -> {ok, {couch_bt_engine_stream, {St#st.fd, StreamSt}}}. - is_active_stream(#st{} = St, {couch_bt_engine_stream, {Fd, _}}) -> St#st.fd == Fd; is_active_stream(_, _) -> false. - fold_docs(St, UserFun, UserAcc, Options) -> fold_docs_int(St, St#st.id_tree, UserFun, UserAcc, Options). - fold_local_docs(St, UserFun, UserAcc, Options) -> case fold_docs_int(St, St#st.local_tree, UserFun, UserAcc, Options) of {ok, _Reds, FinalAcc} -> {ok, null, FinalAcc}; {ok, FinalAcc} -> {ok, FinalAcc} end. - fold_changes(St, SinceSeq, UserFun, UserAcc, Options) -> Fun = fun drop_reductions/4, InAcc = {UserFun, UserAcc}, @@ -621,13 +597,13 @@ fold_changes(St, SinceSeq, UserFun, UserAcc, Options) -> {_, FinalUserAcc} = OutAcc, {ok, FinalUserAcc}. - fold_purge_infos(St, StartSeq0, UserFun, UserAcc, Options) -> PurgeSeqTree = St#st.purge_seq_tree, StartSeq = StartSeq0 + 1, MinSeq = get_oldest_purge_seq(St), - if MinSeq =< StartSeq -> ok; true -> - erlang:error({invalid_start_purge_seq, StartSeq0}) + if + MinSeq =< StartSeq -> ok; + true -> erlang:error({invalid_start_purge_seq, StartSeq0}) end, Wrapper = fun(Info, _Reds, UAcc) -> UserFun(Info, UAcc) @@ -636,7 +612,6 @@ fold_purge_infos(St, StartSeq0, UserFun, UserAcc, Options) -> {ok, _, OutAcc} = couch_btree:fold(PurgeSeqTree, Wrapper, UserAcc, Opts), {ok, OutAcc}. - count_changes_since(St, SinceSeq) -> BTree = St#st.seq_tree, FoldFun = fun(_SeqStart, PartialReds, 0) -> @@ -646,13 +621,11 @@ count_changes_since(St, SinceSeq) -> {ok, Changes} = couch_btree:fold_reduce(BTree, FoldFun, 0, Opts), Changes. - start_compaction(St, DbName, Options, Parent) -> Args = [St, DbName, Options, Parent], Pid = spawn_link(couch_bt_engine_compactor, start, Args), {ok, St, Pid}. - finish_compaction(OldState, DbName, Options, CompactFilePath) -> {ok, NewState1} = ?MODULE:init(CompactFilePath, Options), OldSeq = get_update_seq(OldState), @@ -661,15 +634,16 @@ finish_compaction(OldState, DbName, Options, CompactFilePath) -> true -> finish_compaction_int(OldState, NewState1); false -> - couch_log:info("Compaction file still behind main file " - "(update seq=~p. compact update seq=~p). Retrying.", - [OldSeq, NewSeq]), + couch_log:info( + "Compaction file still behind main file " + "(update seq=~p. compact update seq=~p). Retrying.", + [OldSeq, NewSeq] + ), ok = decref(NewState1), start_compaction(OldState, DbName, Options, self()) end. - -id_tree_split(#full_doc_info{}=Info) -> +id_tree_split(#full_doc_info{} = Info) -> #full_doc_info{ id = Id, update_seq = Seq, @@ -679,11 +653,9 @@ id_tree_split(#full_doc_info{}=Info) -> } = Info, {Id, {Seq, ?b2i(Deleted), split_sizes(SizeInfo), disk_tree(Tree)}}. - id_tree_join(Id, {HighSeq, Deleted, DiskTree}) -> % Handle old formats before data_size was added id_tree_join(Id, {HighSeq, Deleted, #size_info{}, DiskTree}); - id_tree_join(Id, {HighSeq, Deleted, Sizes, DiskTree}) -> #full_doc_info{ id = Id, @@ -693,29 +665,35 @@ id_tree_join(Id, {HighSeq, Deleted, Sizes, DiskTree}) -> rev_tree = rev_tree(DiskTree) }. - id_tree_reduce(reduce, FullDocInfos) -> - lists:foldl(fun(Info, {NotDeleted, Deleted, Sizes}) -> - Sizes2 = reduce_sizes(Sizes, Info#full_doc_info.sizes), - case Info#full_doc_info.deleted of - true -> - {NotDeleted, Deleted + 1, Sizes2}; - false -> - {NotDeleted + 1, Deleted, Sizes2} - end - end, {0, 0, #size_info{}}, FullDocInfos); + lists:foldl( + fun(Info, {NotDeleted, Deleted, Sizes}) -> + Sizes2 = reduce_sizes(Sizes, Info#full_doc_info.sizes), + case Info#full_doc_info.deleted of + true -> + {NotDeleted, Deleted + 1, Sizes2}; + false -> + {NotDeleted + 1, Deleted, Sizes2} + end + end, + {0, 0, #size_info{}}, + FullDocInfos + ); id_tree_reduce(rereduce, Reds) -> - lists:foldl(fun - ({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSizes}) -> - % pre 1.2 format, will be upgraded on compaction - {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil}; - ({NotDeleted, Deleted, Sizes}, {AccNotDeleted, AccDeleted, AccSizes}) -> - AccSizes2 = reduce_sizes(AccSizes, Sizes), - {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSizes2} - end, {0, 0, #size_info{}}, Reds). - - -seq_tree_split(#full_doc_info{}=Info) -> + lists:foldl( + fun + ({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSizes}) -> + % pre 1.2 format, will be upgraded on compaction + {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil}; + ({NotDeleted, Deleted, Sizes}, {AccNotDeleted, AccDeleted, AccSizes}) -> + AccSizes2 = reduce_sizes(AccSizes, Sizes), + {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSizes2} + end, + {0, 0, #size_info{}}, + Reds + ). + +seq_tree_split(#full_doc_info{} = Info) -> #full_doc_info{ id = Id, update_seq = Seq, @@ -725,10 +703,8 @@ seq_tree_split(#full_doc_info{}=Info) -> } = Info, {Seq, {Id, ?b2i(Del), split_sizes(SizeInfo), disk_tree(Tree)}}. - seq_tree_join(Seq, {Id, Del, DiskTree}) when is_integer(Del) -> seq_tree_join(Seq, {Id, Del, {0, 0}, DiskTree}); - seq_tree_join(Seq, {Id, Del, Sizes, DiskTree}) when is_integer(Del) -> #full_doc_info{ id = Id, @@ -737,37 +713,39 @@ seq_tree_join(Seq, {Id, Del, Sizes, DiskTree}) when is_integer(Del) -> sizes = join_sizes(Sizes), rev_tree = rev_tree(DiskTree) }; - seq_tree_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) -> % Older versions stored #doc_info records in the seq_tree. % Compact to upgrade. - Revs = lists:map(fun({Rev, Seq, Bp}) -> - #rev_info{rev = Rev, seq = Seq, deleted = false, body_sp = Bp} - end, RevInfos), - DeletedRevs = lists:map(fun({Rev, Seq, Bp}) -> - #rev_info{rev = Rev, seq = Seq, deleted = true, body_sp = Bp} - end, DeletedRevInfos), + Revs = lists:map( + fun({Rev, Seq, Bp}) -> + #rev_info{rev = Rev, seq = Seq, deleted = false, body_sp = Bp} + end, + RevInfos + ), + DeletedRevs = lists:map( + fun({Rev, Seq, Bp}) -> + #rev_info{rev = Rev, seq = Seq, deleted = true, body_sp = Bp} + end, + DeletedRevInfos + ), #doc_info{ id = Id, high_seq = KeySeq, revs = Revs ++ DeletedRevs }. - seq_tree_reduce(reduce, DocInfos) -> % count the number of documents length(DocInfos); seq_tree_reduce(rereduce, Reds) -> lists:sum(Reds). - local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_binary(Rev) -> #doc{ id = Id, body = BodyData } = Doc, {Id, {binary_to_integer(Rev), BodyData}}; - local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_integer(Rev) -> #doc{ id = Id, @@ -775,14 +753,12 @@ local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_integer(Rev) -> } = Doc, {Id, {Rev, BodyData}}. - local_tree_join(Id, {Rev, BodyData}) when is_binary(Rev) -> #doc{ id = Id, revs = {0, [Rev]}, body = BodyData }; - local_tree_join(Id, {Rev, BodyData}) when is_integer(Rev) -> #doc{ id = Id, @@ -790,30 +766,24 @@ local_tree_join(Id, {Rev, BodyData}) when is_integer(Rev) -> body = BodyData }. - purge_tree_split({PurgeSeq, UUID, DocId, Revs}) -> {UUID, {PurgeSeq, DocId, Revs}}. - purge_tree_join(UUID, {PurgeSeq, DocId, Revs}) -> {PurgeSeq, UUID, DocId, Revs}. - purge_seq_tree_split({PurgeSeq, UUID, DocId, Revs}) -> {PurgeSeq, {UUID, DocId, Revs}}. - purge_seq_tree_join(PurgeSeq, {UUID, DocId, Revs}) -> {PurgeSeq, UUID, DocId, Revs}. - purge_tree_reduce(reduce, IdRevs) -> % count the number of purge requests length(IdRevs); purge_tree_reduce(rereduce, Reds) -> lists:sum(Reds). - set_update_seq(#st{header = Header} = St, UpdateSeq) -> {ok, St#st{ header = couch_bt_engine_header:set(Header, [ @@ -822,7 +792,6 @@ set_update_seq(#st{header = Header} = St, UpdateSeq) -> needs_commit = true }}. - copy_security(#st{header = Header} = St, SecProps) -> Options = [{compression, St#st.compression}], {ok, Ptr, _} = couch_file:append_term(St#st.fd, SecProps, Options), @@ -833,7 +802,6 @@ copy_security(#st{header = Header} = St, SecProps) -> needs_commit = true }}. - copy_props(#st{header = Header} = St, Props) -> Options = [{compression, St#st.compression}], {ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options), @@ -844,7 +812,6 @@ copy_props(#st{header = Header} = St, Props) -> needs_commit = true }}. - open_db_file(FilePath, Options) -> case couch_file:open(FilePath, Options) of {ok, Fd} -> @@ -866,7 +833,6 @@ open_db_file(FilePath, Options) -> throw(Error) end. - init_state(FilePath, Fd, Header0, Options) -> ok = couch_file:sync(Fd), @@ -878,26 +844,26 @@ init_state(FilePath, Fd, Header0, Options) -> IdTreeState = couch_bt_engine_header:id_tree_state(Header), {ok, IdTree} = couch_btree:open(IdTreeState, Fd, [ - {split, fun ?MODULE:id_tree_split/1}, - {join, fun ?MODULE:id_tree_join/2}, - {reduce, fun ?MODULE:id_tree_reduce/2}, - {compression, Compression} - ]), + {split, fun ?MODULE:id_tree_split/1}, + {join, fun ?MODULE:id_tree_join/2}, + {reduce, fun ?MODULE:id_tree_reduce/2}, + {compression, Compression} + ]), SeqTreeState = couch_bt_engine_header:seq_tree_state(Header), {ok, SeqTree} = couch_btree:open(SeqTreeState, Fd, [ - {split, fun ?MODULE:seq_tree_split/1}, - {join, fun ?MODULE:seq_tree_join/2}, - {reduce, fun ?MODULE:seq_tree_reduce/2}, - {compression, Compression} - ]), + {split, fun ?MODULE:seq_tree_split/1}, + {join, fun ?MODULE:seq_tree_join/2}, + {reduce, fun ?MODULE:seq_tree_reduce/2}, + {compression, Compression} + ]), LocalTreeState = couch_bt_engine_header:local_tree_state(Header), {ok, LocalTree} = couch_btree:open(LocalTreeState, Fd, [ - {split, fun ?MODULE:local_tree_split/1}, - {join, fun ?MODULE:local_tree_join/2}, - {compression, Compression} - ]), + {split, fun ?MODULE:local_tree_split/1}, + {join, fun ?MODULE:local_tree_join/2}, + {compression, Compression} + ]), PurgeTreeState = couch_bt_engine_header:purge_tree_state(Header), {ok, PurgeTree} = couch_btree:open(PurgeTreeState, Fd, [ @@ -940,7 +906,6 @@ init_state(FilePath, Fd, Header0, Options) -> St end. - update_header(St, Header) -> couch_bt_engine_header:set(Header, [ {seq_tree_state, couch_btree:get_state(St#st.seq_tree)}, @@ -950,7 +915,6 @@ update_header(St, Header) -> {purge_seq_tree_state, couch_btree:get_state(St#st.purge_seq_tree)} ]). - increment_update_seq(#st{header = Header} = St) -> UpdateSeq = couch_bt_engine_header:get(Header, update_seq), St#st{ @@ -959,7 +923,6 @@ increment_update_seq(#st{header = Header} = St) -> ]) }. - set_default_security_object(Fd, Header, Compression, Options) -> case couch_bt_engine_header:get(Header, security_ptr) of Pointer when is_integer(Pointer) -> @@ -971,7 +934,6 @@ set_default_security_object(Fd, Header, Compression, Options) -> couch_bt_engine_header:set(Header, security_ptr, Ptr) end. - % This function is here, and not in couch_bt_engine_header % because it requires modifying file contents upgrade_purge_info(Fd, Header) -> @@ -980,24 +942,32 @@ upgrade_purge_info(Fd, Header) -> Header; Ptr when is_tuple(Ptr) -> Header; - PurgeSeq when is_integer(PurgeSeq)-> + PurgeSeq when is_integer(PurgeSeq) -> % Pointer to old purged ids/revs is in purge_seq_tree_state Ptr = couch_bt_engine_header:get(Header, purge_seq_tree_state), case Ptr of nil -> PTS = couch_bt_engine_header:purge_tree_state(Header), - PurgeTreeSt = case PTS of 0 -> nil; Else -> Else end, + PurgeTreeSt = + case PTS of + 0 -> nil; + Else -> Else + end, couch_bt_engine_header:set(Header, [ {purge_tree_state, PurgeTreeSt} ]); _ -> {ok, PurgedIdsRevs} = couch_file:pread_term(Fd, Ptr), - {Infos, _} = lists:foldl(fun({Id, Revs}, {InfoAcc, PSeq}) -> - Info = {PSeq, couch_uuids:random(), Id, Revs}, - {[Info | InfoAcc], PSeq + 1} - end, {[], PurgeSeq}, PurgedIdsRevs), + {Infos, _} = lists:foldl( + fun({Id, Revs}, {InfoAcc, PSeq}) -> + Info = {PSeq, couch_uuids:random(), Id, Revs}, + {[Info | InfoAcc], PSeq + 1} + end, + {[], PurgeSeq}, + PurgedIdsRevs + ), {ok, PurgeTree} = couch_btree:open(nil, Fd, [ {split, fun ?MODULE:purge_tree_split/1}, @@ -1022,7 +992,6 @@ upgrade_purge_info(Fd, Header) -> end end. - init_set_props(Fd, Header, Options) -> case couch_util:get_value(props, Options) of undefined -> @@ -1034,70 +1003,70 @@ init_set_props(Fd, Header, Options) -> couch_bt_engine_header:set(Header, props_ptr, Ptr) end. - delete_compaction_files(FilePath) -> RootDir = config:get("couchdb", "database_dir", "."), DelOpts = [{context, compaction}], delete_compaction_files(RootDir, FilePath, DelOpts). - rev_tree(DiskTree) -> - couch_key_tree:map(fun - (_RevId, {Del, Ptr, Seq}) -> - #leaf{ - deleted = ?i2b(Del), - ptr = Ptr, - seq = Seq - }; - (_RevId, {Del, Ptr, Seq, Size}) -> - #leaf{ - deleted = ?i2b(Del), - ptr = Ptr, - seq = Seq, - sizes = couch_db_updater:upgrade_sizes(Size) - }; - (_RevId, {Del, Ptr, Seq, Sizes, Atts}) -> - #leaf{ - deleted = ?i2b(Del), - ptr = Ptr, - seq = Seq, - sizes = couch_db_updater:upgrade_sizes(Sizes), - atts = Atts - }; - (_RevId, ?REV_MISSING) -> - ?REV_MISSING - end, DiskTree). - + couch_key_tree:map( + fun + (_RevId, {Del, Ptr, Seq}) -> + #leaf{ + deleted = ?i2b(Del), + ptr = Ptr, + seq = Seq + }; + (_RevId, {Del, Ptr, Seq, Size}) -> + #leaf{ + deleted = ?i2b(Del), + ptr = Ptr, + seq = Seq, + sizes = couch_db_updater:upgrade_sizes(Size) + }; + (_RevId, {Del, Ptr, Seq, Sizes, Atts}) -> + #leaf{ + deleted = ?i2b(Del), + ptr = Ptr, + seq = Seq, + sizes = couch_db_updater:upgrade_sizes(Sizes), + atts = Atts + }; + (_RevId, ?REV_MISSING) -> + ?REV_MISSING + end, + DiskTree + ). disk_tree(RevTree) -> - couch_key_tree:map(fun - (_RevId, ?REV_MISSING) -> - ?REV_MISSING; - (_RevId, #leaf{} = Leaf) -> - #leaf{ - deleted = Del, - ptr = Ptr, - seq = Seq, - sizes = Sizes, - atts = Atts - } = Leaf, - {?b2i(Del), Ptr, Seq, split_sizes(Sizes), Atts} - end, RevTree). - - -split_sizes(#size_info{}=SI) -> + couch_key_tree:map( + fun + (_RevId, ?REV_MISSING) -> + ?REV_MISSING; + (_RevId, #leaf{} = Leaf) -> + #leaf{ + deleted = Del, + ptr = Ptr, + seq = Seq, + sizes = Sizes, + atts = Atts + } = Leaf, + {?b2i(Del), Ptr, Seq, split_sizes(Sizes), Atts} + end, + RevTree + ). + +split_sizes(#size_info{} = SI) -> {SI#size_info.active, SI#size_info.external}. - join_sizes({Active, External}) when is_integer(Active), is_integer(External) -> - #size_info{active=Active, external=External}. - + #size_info{active = Active, external = External}. reduce_sizes(nil, _) -> nil; reduce_sizes(_, nil) -> nil; -reduce_sizes(#size_info{}=S1, #size_info{}=S2) -> +reduce_sizes(#size_info{} = S1, #size_info{} = S2) -> #size_info{ active = S1#size_info.active + S2#size_info.active, external = S1#size_info.external + S2#size_info.external @@ -1107,7 +1076,6 @@ reduce_sizes(S1, S2) -> US2 = couch_db_updater:upgrade_sizes(S2), reduce_sizes(US1, US2). - active_size(#st{} = St, #size_info{} = SI) -> Trees = [ St#st.id_tree, @@ -1116,27 +1084,32 @@ active_size(#st{} = St, #size_info{} = SI) -> St#st.purge_tree, St#st.purge_seq_tree ], - lists:foldl(fun(T, Acc) -> - case couch_btree:size(T) of - _ when Acc == null -> - null; - nil -> - null; - Size -> - Acc + Size - end - end, SI#size_info.active, Trees). - + lists:foldl( + fun(T, Acc) -> + case couch_btree:size(T) of + _ when Acc == null -> + null; + nil -> + null; + Size -> + Acc + Size + end + end, + SI#size_info.active, + Trees + ). fold_docs_int(St, Tree, UserFun, UserAcc, Options) -> - Fun = case lists:member(include_deleted, Options) of - true -> fun include_deleted/4; - false -> fun skip_deleted/4 - end, - RedFun = case lists:member(include_reductions, Options) of - true -> fun include_reductions/4; - false -> fun drop_reductions/4 - end, + Fun = + case lists:member(include_deleted, Options) of + true -> fun include_deleted/4; + false -> fun skip_deleted/4 + end, + RedFun = + case lists:member(include_reductions, Options) of + true -> fun include_reductions/4; + false -> fun drop_reductions/4 + end, InAcc = {RedFun, {UserFun, UserAcc}}, {ok, Reds, OutAcc} = couch_btree:fold(Tree, Fun, InAcc, Options), {_, {_, FinalUserAcc}} = OutAcc, @@ -1149,12 +1122,10 @@ fold_docs_int(St, Tree, UserFun, UserAcc, Options) -> {ok, FinalUserAcc} end. - include_deleted(Case, Entry, Reds, {UserFun, UserAcc}) -> {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc), {Go, {UserFun, NewUserAcc}}. - % First element of the reductions is the total % number of undeleted documents. skip_deleted(traverse, _Entry, {0, _, _} = _Reds, Acc) -> @@ -1165,27 +1136,23 @@ skip_deleted(Case, Entry, Reds, {UserFun, UserAcc}) -> {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc), {Go, {UserFun, NewUserAcc}}. - include_reductions(visit, FDI, Reds, {UserFun, UserAcc}) -> {Go, NewUserAcc} = UserFun(FDI, Reds, UserAcc), {Go, {UserFun, NewUserAcc}}; include_reductions(_, _, _, Acc) -> {ok, Acc}. - drop_reductions(visit, FDI, _Reds, {UserFun, UserAcc}) -> {Go, NewUserAcc} = UserFun(FDI, UserAcc), {Go, {UserFun, NewUserAcc}}; drop_reductions(_, _, _, Acc) -> {ok, Acc}. - fold_docs_reduce_to_count(Reds) -> RedFun = fun id_tree_reduce/2, FinalRed = couch_btree:final_reduce(RedFun, Reds), element(1, FinalRed). - finish_compaction_int(#st{} = OldSt, #st{} = NewSt1) -> #st{ filepath = FilePath, @@ -1233,10 +1200,11 @@ finish_compaction_int(#st{} = OldSt, #st{} = NewSt1) -> decref(OldSt), % And return our finished new state - {ok, NewSt2#st{ - filepath = FilePath - }, undefined}. - + {ok, + NewSt2#st{ + filepath = FilePath + }, + undefined}. is_file(Path) -> case file:read_file_info(Path, [raw]) of diff --git a/src/couch/src/couch_bt_engine_compactor.erl b/src/couch/src/couch_bt_engine_compactor.erl index 3e356e2e3..8ed55b5c3 100644 --- a/src/couch/src/couch_bt_engine_compactor.erl +++ b/src/couch/src/couch_bt_engine_compactor.erl @@ -12,16 +12,13 @@ -module(couch_bt_engine_compactor). - -export([ start/4 ]). - -include_lib("couch/include/couch_db.hrl"). -include("couch_bt_engine.hrl"). - -record(comp_st, { db_name, old_st, @@ -44,14 +41,12 @@ locs }). - -ifdef(TEST). -define(COMP_EVENT(Name), couch_bt_engine_compactor_ev:event(Name)). -else. -define(COMP_EVENT(Name), ignore). -endif. - start(#st{} = St, DbName, Options, Parent) -> erlang:put(io_priority, {db_compact, DbName}), couch_log:debug("Compaction process spawned for db \"~s\"", [DbName]), @@ -72,9 +67,13 @@ start(#st{} = St, DbName, Options, Parent) -> fun compact_final_sync/1 ], - FinalCompSt = lists:foldl(fun(Stage, CompSt) -> - Stage(CompSt) - end, InitCompSt, Stages), + FinalCompSt = lists:foldl( + fun(Stage, CompSt) -> + Stage(CompSt) + end, + InitCompSt, + Stages + ), #comp_st{ new_st = FinalNewSt, @@ -88,7 +87,6 @@ start(#st{} = St, DbName, Options, Parent) -> Msg = {compact_done, couch_bt_engine, FinalNewSt#st.filepath}, gen_server:cast(Parent, Msg). - open_compaction_files(DbName, OldSt, Options) -> #st{ filepath = DbFilePath, @@ -99,57 +97,59 @@ open_compaction_files(DbName, OldSt, Options) -> {ok, DataFd, DataHdr} = open_compaction_file(DataFile), {ok, MetaFd, MetaHdr} = open_compaction_file(MetaFile), DataHdrIsDbHdr = couch_bt_engine_header:is_header(DataHdr), - CompSt = case {DataHdr, MetaHdr} of - {#comp_header{}=A, #comp_header{}=A} -> - % We're restarting a compaction that did not finish - % before trying to swap out with the original db - DbHeader = A#comp_header.db_header, - St0 = couch_bt_engine:init_state( - DataFile, DataFd, DbHeader, Options), - St1 = bind_emsort(St0, MetaFd, A#comp_header.meta_st), - #comp_st{ - db_name = DbName, - old_st = OldSt, - new_st = St1, - meta_fd = MetaFd, - retry = St0#st.id_tree - }; - _ when DataHdrIsDbHdr -> - % We tried to swap out the compaction but there were - % writes to the database during compaction. Start - % a compaction retry. - Header = couch_bt_engine_header:from(SrcHdr), - ok = reset_compaction_file(MetaFd, Header), - St0 = couch_bt_engine:init_state( - DataFile, DataFd, DataHdr, Options), - St1 = bind_emsort(St0, MetaFd, nil), - #comp_st{ - db_name = DbName, - old_st = OldSt, - new_st = St1, - meta_fd = MetaFd, - retry = St0#st.id_tree - }; - _ -> - % We're starting a compaction from scratch - Header = couch_bt_engine_header:from(SrcHdr), - ok = reset_compaction_file(DataFd, Header), - ok = reset_compaction_file(MetaFd, Header), - St0 = couch_bt_engine:init_state(DataFile, DataFd, Header, Options), - St1 = bind_emsort(St0, MetaFd, nil), - #comp_st{ - db_name = DbName, - old_st = OldSt, - new_st = St1, - meta_fd = MetaFd, - retry = nil - } - end, + CompSt = + case {DataHdr, MetaHdr} of + {#comp_header{} = A, #comp_header{} = A} -> + % We're restarting a compaction that did not finish + % before trying to swap out with the original db + DbHeader = A#comp_header.db_header, + St0 = couch_bt_engine:init_state( + DataFile, DataFd, DbHeader, Options + ), + St1 = bind_emsort(St0, MetaFd, A#comp_header.meta_st), + #comp_st{ + db_name = DbName, + old_st = OldSt, + new_st = St1, + meta_fd = MetaFd, + retry = St0#st.id_tree + }; + _ when DataHdrIsDbHdr -> + % We tried to swap out the compaction but there were + % writes to the database during compaction. Start + % a compaction retry. + Header = couch_bt_engine_header:from(SrcHdr), + ok = reset_compaction_file(MetaFd, Header), + St0 = couch_bt_engine:init_state( + DataFile, DataFd, DataHdr, Options + ), + St1 = bind_emsort(St0, MetaFd, nil), + #comp_st{ + db_name = DbName, + old_st = OldSt, + new_st = St1, + meta_fd = MetaFd, + retry = St0#st.id_tree + }; + _ -> + % We're starting a compaction from scratch + Header = couch_bt_engine_header:from(SrcHdr), + ok = reset_compaction_file(DataFd, Header), + ok = reset_compaction_file(MetaFd, Header), + St0 = couch_bt_engine:init_state(DataFile, DataFd, Header, Options), + St1 = bind_emsort(St0, MetaFd, nil), + #comp_st{ + db_name = DbName, + old_st = OldSt, + new_st = St1, + meta_fd = MetaFd, + retry = nil + } + end, unlink(DataFd), erlang:monitor(process, MetaFd), {ok, CompSt}. - copy_purge_info(#comp_st{} = CompSt) -> #comp_st{ db_name = DbName, @@ -164,25 +164,30 @@ copy_purge_info(#comp_st{} = CompSt) -> OldPSTree = OldSt#st.purge_seq_tree, StartSeq = couch_bt_engine:get_purge_seq(NewSt) + 1, BufferSize = config:get_integer( - "database_compaction", "doc_buffer_size", 524288), + "database_compaction", "doc_buffer_size", 524288 + ), CheckpointAfter = config:get( - "database_compaction", "checkpoint_after", BufferSize * 10), + "database_compaction", "checkpoint_after", BufferSize * 10 + ), EnumFun = fun(Info, _Reds, {StAcc0, InfosAcc, InfosSize, CopiedSize}) -> NewInfosSize = InfosSize + ?term_size(Info), - if NewInfosSize >= BufferSize -> - StAcc1 = copy_purge_infos( - OldSt, StAcc0, [Info | InfosAcc], MinPurgeSeq, Retry), - NewCopiedSize = CopiedSize + NewInfosSize, - if NewCopiedSize >= CheckpointAfter -> - StAcc2 = commit_compaction_data(StAcc1), - {ok, {StAcc2, [], 0, 0}}; + if + NewInfosSize >= BufferSize -> + StAcc1 = copy_purge_infos( + OldSt, StAcc0, [Info | InfosAcc], MinPurgeSeq, Retry + ), + NewCopiedSize = CopiedSize + NewInfosSize, + if + NewCopiedSize >= CheckpointAfter -> + StAcc2 = commit_compaction_data(StAcc1), + {ok, {StAcc2, [], 0, 0}}; + true -> + {ok, {StAcc1, [], 0, NewCopiedSize}} + end; true -> - {ok, {StAcc1, [], 0, NewCopiedSize}} - end; - true -> - NewInfosAcc = [Info | InfosAcc], - {ok, {StAcc0, NewInfosAcc, NewInfosSize, CopiedSize}} + NewInfosAcc = [Info | InfosAcc], + {ok, {StAcc0, NewInfosAcc, NewInfosSize, CopiedSize}} end end, @@ -197,7 +202,6 @@ copy_purge_info(#comp_st{} = CompSt) -> new_st = FinalNewSt }. - copy_purge_infos(OldSt, NewSt0, Infos, MinPurgeSeq, Retry) -> #st{ id_tree = OldIdTree @@ -217,9 +221,12 @@ copy_purge_infos(OldSt, NewSt0, Infos, MinPurgeSeq, Retry) -> } = NewSt1, % Copy over the purge infos - InfosToAdd = lists:filter(fun({PSeq, _, _, _}) -> - PSeq > MinPurgeSeq - end, Infos), + InfosToAdd = lists:filter( + fun({PSeq, _, _, _}) -> + PSeq > MinPurgeSeq + end, + Infos + ), {ok, NewPurgeTree1} = couch_btree:add(NewPurgeTree0, InfosToAdd), {ok, NewPurgeSeqTree1} = couch_btree:add(NewPurgeSeqTree0, InfosToAdd), @@ -232,35 +239,44 @@ copy_purge_infos(OldSt, NewSt0, Infos, MinPurgeSeq, Retry) -> % any of the referenced docs have been completely purged % from the database. Any doc that has been completely purged % must then be removed from our partially compacted database. - NewSt3 = if Retry == nil -> NewSt2; true -> - AllDocIds = [DocId || {_PurgeSeq, _UUID, DocId, _Revs} <- Infos], - UniqDocIds = lists:usort(AllDocIds), - OldIdResults = couch_btree:lookup(OldIdTree, UniqDocIds), - OldZipped = lists:zip(UniqDocIds, OldIdResults), - - % The list of non-existant docs in the database being compacted - MaybeRemDocIds = [DocId || {DocId, not_found} <- OldZipped], - - % Removing anything that exists in the partially compacted database - NewIdResults = couch_btree:lookup(NewIdTree0, MaybeRemDocIds), - ToRemove = [Doc || {ok, Doc} <- NewIdResults, Doc /= {ok, not_found}], - - {RemIds, RemSeqs} = lists:unzip(lists:map(fun(FDI) -> - #full_doc_info{ - id = Id, - update_seq = Seq - } = FDI, - {Id, Seq} - end, ToRemove)), - - {ok, NewIdTree1} = couch_btree:add_remove(NewIdTree0, [], RemIds), - {ok, NewSeqTree1} = couch_btree:add_remove(NewSeqTree0, [], RemSeqs), - - NewSt2#st{ - id_tree = NewIdTree1, - seq_tree = NewSeqTree1 - } - end, + NewSt3 = + if + Retry == nil -> + NewSt2; + true -> + AllDocIds = [DocId || {_PurgeSeq, _UUID, DocId, _Revs} <- Infos], + UniqDocIds = lists:usort(AllDocIds), + OldIdResults = couch_btree:lookup(OldIdTree, UniqDocIds), + OldZipped = lists:zip(UniqDocIds, OldIdResults), + + % The list of non-existant docs in the database being compacted + MaybeRemDocIds = [DocId || {DocId, not_found} <- OldZipped], + + % Removing anything that exists in the partially compacted database + NewIdResults = couch_btree:lookup(NewIdTree0, MaybeRemDocIds), + ToRemove = [Doc || {ok, Doc} <- NewIdResults, Doc /= {ok, not_found}], + + {RemIds, RemSeqs} = lists:unzip( + lists:map( + fun(FDI) -> + #full_doc_info{ + id = Id, + update_seq = Seq + } = FDI, + {Id, Seq} + end, + ToRemove + ) + ), + + {ok, NewIdTree1} = couch_btree:add_remove(NewIdTree0, [], RemIds), + {ok, NewSeqTree1} = couch_btree:add_remove(NewSeqTree0, [], RemSeqs), + + NewSt2#st{ + id_tree = NewIdTree1, + seq_tree = NewSeqTree1 + } + end, Header = couch_bt_engine:update_header(NewSt3, NewSt3#st.header), NewSt4 = NewSt3#st{ @@ -268,7 +284,6 @@ copy_purge_infos(OldSt, NewSt0, Infos, MinPurgeSeq, Retry) -> }, bind_emsort(NewSt4, MetaFd, MetaState). - copy_compact(#comp_st{} = CompSt) -> #comp_st{ db_name = DbName, @@ -282,39 +297,49 @@ copy_compact(#comp_st{} = CompSt) -> NewUpdateSeq = couch_bt_engine:get_update_seq(NewSt0), TotalChanges = couch_bt_engine:count_changes_since(St, NewUpdateSeq), BufferSize = list_to_integer( - config:get("database_compaction", "doc_buffer_size", "524288")), + config:get("database_compaction", "doc_buffer_size", "524288") + ), CheckpointAfter = couch_util:to_integer( - config:get("database_compaction", "checkpoint_after", - BufferSize * 10)), + config:get( + "database_compaction", + "checkpoint_after", + BufferSize * 10 + ) + ), EnumBySeqFun = - fun(DocInfo, _Offset, - {AccNewSt, AccUncopied, AccUncopiedSize, AccCopiedSize}) -> + fun( + DocInfo, + _Offset, + {AccNewSt, AccUncopied, AccUncopiedSize, AccCopiedSize} + ) -> + Seq = + case DocInfo of + #full_doc_info{} -> DocInfo#full_doc_info.update_seq; + #doc_info{} -> DocInfo#doc_info.high_seq + end, - Seq = case DocInfo of - #full_doc_info{} -> DocInfo#full_doc_info.update_seq; - #doc_info{} -> DocInfo#doc_info.high_seq + AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo), + if + AccUncopiedSize2 >= BufferSize -> + NewSt2 = copy_docs( + St, AccNewSt, lists:reverse([DocInfo | AccUncopied]), Retry + ), + AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2, + if + AccCopiedSize2 >= CheckpointAfter -> + {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq), + CommNewSt3 = commit_compaction_data(NewSt3), + {ok, {CommNewSt3, [], 0, 0}}; + true -> + {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq), + {ok, {NewSt3, [], 0, AccCopiedSize2}} + end; + true -> + {ok, {AccNewSt, [DocInfo | AccUncopied], AccUncopiedSize2, AccCopiedSize}} + end end, - AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo), - if AccUncopiedSize2 >= BufferSize -> - NewSt2 = copy_docs( - St, AccNewSt, lists:reverse([DocInfo | AccUncopied]), Retry), - AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2, - if AccCopiedSize2 >= CheckpointAfter -> - {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq), - CommNewSt3 = commit_compaction_data(NewSt3), - {ok, {CommNewSt3, [], 0, 0}}; - true -> - {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq), - {ok, {NewSt3, [], 0, AccCopiedSize2}} - end; - true -> - {ok, {AccNewSt, [DocInfo | AccUncopied], AccUncopiedSize2, - AccCopiedSize}} - end - end, - TaskProps0 = [ {type, database_compaction}, {database, DbName}, @@ -324,24 +349,27 @@ copy_compact(#comp_st{} = CompSt) -> {total_changes, TotalChanges} ], case (Retry =/= nil) and couch_task_status:is_task_added() of - true -> - couch_task_status:update([ - {retry, true}, - {phase, document_copy}, - {progress, 0}, - {changes_done, 0}, - {total_changes, TotalChanges} - ]); - false -> - couch_task_status:add_task(TaskProps0), - couch_task_status:set_update_frequency(500) + true -> + couch_task_status:update([ + {retry, true}, + {phase, document_copy}, + {progress, 0}, + {changes_done, 0}, + {total_changes, TotalChanges} + ]); + false -> + couch_task_status:add_task(TaskProps0), + couch_task_status:set_update_frequency(500) end, ?COMP_EVENT(seq_init), {ok, _, {NewSt2, Uncopied, _, _}} = - couch_btree:foldl(St#st.seq_tree, EnumBySeqFun, + couch_btree:foldl( + St#st.seq_tree, + EnumBySeqFun, {NewSt, [], 0, 0}, - [{start_key, NewUpdateSeq + 1}]), + [{start_key, NewUpdateSeq + 1}] + ), NewSt3 = copy_docs(St, NewSt2, lists:reverse(Uncopied), Retry), @@ -362,146 +390,168 @@ copy_compact(#comp_st{} = CompSt) -> new_st = NewSt6 }. - copy_docs(St, #st{} = NewSt, MixedInfos, Retry) -> - DocInfoIds = [Id || #doc_info{id=Id} <- MixedInfos], + DocInfoIds = [Id || #doc_info{id = Id} <- MixedInfos], LookupResults = couch_btree:lookup(St#st.id_tree, DocInfoIds), % COUCHDB-968, make sure we prune duplicates during compaction - NewInfos0 = lists:usort(fun(#full_doc_info{id=A}, #full_doc_info{id=B}) -> - A =< B - end, merge_lookups(MixedInfos, LookupResults)), - - NewInfos1 = lists:map(fun(Info) -> - {NewRevTree, FinalAcc} = couch_key_tree:mapfold(fun - ({RevPos, RevId}, #leaf{ptr=Sp}=Leaf, leaf, SizesAcc) -> - {Body, AttInfos} = copy_doc_attachments(St, Sp, NewSt), - #size_info{external = OldExternalSize} = Leaf#leaf.sizes, - ExternalSize = case OldExternalSize of - 0 when is_binary(Body) -> - couch_compress:uncompressed_size(Body); - 0 -> - couch_ejson_size:encoded_size(Body); - N -> N + NewInfos0 = lists:usort( + fun(#full_doc_info{id = A}, #full_doc_info{id = B}) -> + A =< B + end, + merge_lookups(MixedInfos, LookupResults) + ), + + NewInfos1 = lists:map( + fun(Info) -> + {NewRevTree, FinalAcc} = couch_key_tree:mapfold( + fun + ({RevPos, RevId}, #leaf{ptr = Sp} = Leaf, leaf, SizesAcc) -> + {Body, AttInfos} = copy_doc_attachments(St, Sp, NewSt), + #size_info{external = OldExternalSize} = Leaf#leaf.sizes, + ExternalSize = + case OldExternalSize of + 0 when is_binary(Body) -> + couch_compress:uncompressed_size(Body); + 0 -> + couch_ejson_size:encoded_size(Body); + N -> + N + end, + Doc0 = #doc{ + id = Info#full_doc_info.id, + revs = {RevPos, [RevId]}, + deleted = Leaf#leaf.deleted, + body = Body, + atts = AttInfos + }, + Doc1 = couch_bt_engine:serialize_doc(NewSt, Doc0), + {ok, Doc2, ActiveSize} = + couch_bt_engine:write_doc_body(NewSt, Doc1), + AttSizes = [{element(3, A), element(4, A)} || A <- AttInfos], + NewLeaf = Leaf#leaf{ + ptr = Doc2#doc.body, + sizes = #size_info{ + active = ActiveSize, + external = ExternalSize + }, + atts = AttSizes + }, + {NewLeaf, couch_db_updater:add_sizes(leaf, NewLeaf, SizesAcc)}; + (_Rev, _Leaf, branch, SizesAcc) -> + {?REV_MISSING, SizesAcc} end, - Doc0 = #doc{ - id = Info#full_doc_info.id, - revs = {RevPos, [RevId]}, - deleted = Leaf#leaf.deleted, - body = Body, - atts = AttInfos - }, - Doc1 = couch_bt_engine:serialize_doc(NewSt, Doc0), - {ok, Doc2, ActiveSize} = - couch_bt_engine:write_doc_body(NewSt, Doc1), - AttSizes = [{element(3,A), element(4,A)} || A <- AttInfos], - NewLeaf = Leaf#leaf{ - ptr = Doc2#doc.body, - sizes = #size_info{ - active = ActiveSize, - external = ExternalSize - }, - atts = AttSizes - }, - {NewLeaf, couch_db_updater:add_sizes(leaf, NewLeaf, SizesAcc)}; - (_Rev, _Leaf, branch, SizesAcc) -> - {?REV_MISSING, SizesAcc} - end, {0, 0, []}, Info#full_doc_info.rev_tree), - {FinalAS, FinalES, FinalAtts} = FinalAcc, - TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts), - NewActiveSize = FinalAS + TotalAttSize, - NewExternalSize = FinalES + TotalAttSize, - ?COMP_EVENT(seq_copy), - Info#full_doc_info{ - rev_tree = NewRevTree, - sizes = #size_info{ - active = NewActiveSize, - external = NewExternalSize + {0, 0, []}, + Info#full_doc_info.rev_tree + ), + {FinalAS, FinalES, FinalAtts} = FinalAcc, + TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts), + NewActiveSize = FinalAS + TotalAttSize, + NewExternalSize = FinalES + TotalAttSize, + ?COMP_EVENT(seq_copy), + Info#full_doc_info{ + rev_tree = NewRevTree, + sizes = #size_info{ + active = NewActiveSize, + external = NewExternalSize + } } - } - end, NewInfos0), + end, + NewInfos0 + ), Limit = couch_bt_engine:get_revs_limit(St), - NewInfos = lists:map(fun(FDI) -> - FDI#full_doc_info{ - rev_tree = couch_key_tree:stem(FDI#full_doc_info.rev_tree, Limit) - } - end, NewInfos1), + NewInfos = lists:map( + fun(FDI) -> + FDI#full_doc_info{ + rev_tree = couch_key_tree:stem(FDI#full_doc_info.rev_tree, Limit) + } + end, + NewInfos1 + ), RemoveSeqs = - case Retry of - nil -> - []; - OldDocIdTree -> - % Compaction is being rerun to catch up to writes during the - % first pass. This means we may have docs that already exist - % in the seq_tree in the .data file. Here we lookup any old - % update_seqs so that they can be removed. - Ids = [Id || #full_doc_info{id=Id} <- NewInfos], - Existing = couch_btree:lookup(OldDocIdTree, Ids), - [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing] - end, + case Retry of + nil -> + []; + OldDocIdTree -> + % Compaction is being rerun to catch up to writes during the + % first pass. This means we may have docs that already exist + % in the seq_tree in the .data file. Here we lookup any old + % update_seqs so that they can be removed. + Ids = [Id || #full_doc_info{id = Id} <- NewInfos], + Existing = couch_btree:lookup(OldDocIdTree, Ids), + [Seq || {ok, #full_doc_info{update_seq = Seq}} <- Existing] + end, {ok, SeqTree} = couch_btree:add_remove( - NewSt#st.seq_tree, NewInfos, RemoveSeqs), + NewSt#st.seq_tree, NewInfos, RemoveSeqs + ), EMSortFd = couch_emsort:get_fd(NewSt#st.id_tree), {ok, LocSizes} = couch_file:append_terms(EMSortFd, NewInfos), - EMSortEntries = lists:zipwith(fun(FDI, {Loc, _}) -> - #full_doc_info{ - id = Id, - update_seq = Seq - } = FDI, - {{Id, Seq}, Loc} - end, NewInfos, LocSizes), + EMSortEntries = lists:zipwith( + fun(FDI, {Loc, _}) -> + #full_doc_info{ + id = Id, + update_seq = Seq + } = FDI, + {{Id, Seq}, Loc} + end, + NewInfos, + LocSizes + ), {ok, IdEms} = couch_emsort:add(NewSt#st.id_tree, EMSortEntries), update_compact_task(length(NewInfos)), - NewSt#st{id_tree=IdEms, seq_tree=SeqTree}. - + NewSt#st{id_tree = IdEms, seq_tree = SeqTree}. copy_doc_attachments(#st{} = SrcSt, SrcSp, DstSt) -> {ok, {BodyData, BinInfos0}} = couch_file:pread_term(SrcSt#st.fd, SrcSp), - BinInfos = case BinInfos0 of - _ when is_binary(BinInfos0) -> - couch_compress:decompress(BinInfos0); - _ when is_list(BinInfos0) -> - % pre 1.2 file format - BinInfos0 - end, + BinInfos = + case BinInfos0 of + _ when is_binary(BinInfos0) -> + couch_compress:decompress(BinInfos0); + _ when is_list(BinInfos0) -> + % pre 1.2 file format + BinInfos0 + end, % copy the bin values NewBinInfos = lists:map( - fun({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}) -> - % 010 UPGRADE CODE - {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp), - {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []), - ok = couch_stream:copy(SrcStream, DstStream), - {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} = - couch_stream:close(DstStream), - {ok, NewBinSp} = couch_stream:to_disk_term(NewStream), - couch_util:check_md5(ExpectedMd5, ActualMd5), - {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity}; - ({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}) -> - {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp), - {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []), - ok = couch_stream:copy(SrcStream, DstStream), - {NewStream, AttLen, _, ActualMd5, _IdentityMd5} = - couch_stream:close(DstStream), - {ok, NewBinSp} = couch_stream:to_disk_term(NewStream), - couch_util:check_md5(ExpectedMd5, ActualMd5), - Enc = case Enc1 of - true -> - % 0110 UPGRADE CODE - gzip; - false -> - % 0110 UPGRADE CODE - identity; - _ -> - Enc1 - end, - {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc} - end, BinInfos), + fun + ({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}) -> + % 010 UPGRADE CODE + {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp), + {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []), + ok = couch_stream:copy(SrcStream, DstStream), + {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} = + couch_stream:close(DstStream), + {ok, NewBinSp} = couch_stream:to_disk_term(NewStream), + couch_util:check_md5(ExpectedMd5, ActualMd5), + {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity}; + ({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}) -> + {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp), + {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []), + ok = couch_stream:copy(SrcStream, DstStream), + {NewStream, AttLen, _, ActualMd5, _IdentityMd5} = + couch_stream:close(DstStream), + {ok, NewBinSp} = couch_stream:to_disk_term(NewStream), + couch_util:check_md5(ExpectedMd5, ActualMd5), + Enc = + case Enc1 of + true -> + % 0110 UPGRADE CODE + gzip; + false -> + % 0110 UPGRADE CODE + identity; + _ -> + Enc1 + end, + {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc} + end, + BinInfos + ), {BodyData, NewBinInfos}. - sort_meta_data(#comp_st{new_st = St0} = CompSt) -> ?COMP_EVENT(md_sort_init), NumKVs = couch_emsort:num_kvs(St0#st.id_tree), @@ -521,7 +571,6 @@ sort_meta_data(#comp_st{new_st = St0} = CompSt) -> } }. - copy_meta_data(#comp_st{new_st = St} = CompSt) -> #st{ fd = Fd, @@ -537,11 +586,11 @@ copy_meta_data(#comp_st{new_st = St} = CompSt) -> ]), {ok, Iter} = couch_emsort:iter(Src), Acc0 = #merge_st{ - src_fd=SrcFd, - id_tree=IdTree0, - seq_tree=St#st.seq_tree, - rem_seqs=[], - locs=[] + src_fd = SrcFd, + id_tree = IdTree0, + seq_tree = St#st.seq_tree, + rem_seqs = [], + locs = [] }, ?COMP_EVENT(md_copy_init), NumKVs = couch_emsort:num_kvs(Src), @@ -566,7 +615,6 @@ copy_meta_data(#comp_st{new_st = St} = CompSt) -> } }. - compact_final_sync(#comp_st{new_st = St0} = CompSt) -> ?COMP_EVENT(before_final_sync), {ok, St1} = couch_bt_engine:commit_data(St0), @@ -575,7 +623,6 @@ compact_final_sync(#comp_st{new_st = St0} = CompSt) -> new_st = St1 }. - open_compaction_file(FilePath) -> case couch_file:open(FilePath, [nologifmissing]) of {ok, Fd} -> @@ -588,12 +635,10 @@ open_compaction_file(FilePath) -> {ok, Fd, nil} end. - reset_compaction_file(Fd, Header) -> ok = couch_file:truncate(Fd, 0), ok = couch_file:write_header(Fd, Header). - commit_compaction_data(#comp_st{new_st = St} = CompSt) -> % Compaction needs to write headers to both the data file % and the meta file so if we need to restart we can pick @@ -601,12 +646,10 @@ commit_compaction_data(#comp_st{new_st = St} = CompSt) -> CompSt#comp_st{ new_st = commit_compaction_data(St) }; - commit_compaction_data(#st{} = St) -> commit_compaction_data(St, couch_emsort:get_fd(St#st.id_tree)), commit_compaction_data(St, St#st.fd). - commit_compaction_data(#st{header = OldHeader} = St0, Fd) -> DataState = couch_bt_engine_header:id_tree_state(OldHeader), MetaFd = couch_emsort:get_fd(St0#st.id_tree), @@ -624,17 +667,15 @@ commit_compaction_data(#st{header = OldHeader} = St0, Fd) -> }, bind_emsort(St2, MetaFd, MetaState). - bind_emsort(St, Fd, nil) -> {ok, Ems} = couch_emsort:open(Fd), - St#st{id_tree=Ems}; + St#st{id_tree = Ems}; bind_emsort(St, Fd, {BB, _} = Root) when is_list(BB) -> % Upgrade clause when we find old compaction files bind_emsort(St, Fd, [{root, Root}]); bind_emsort(St, Fd, State) -> {ok, Ems} = couch_emsort:open(Fd, State), - St#st{id_tree=Ems}. - + St#st{id_tree = Ems}. bind_id_tree(St, Fd, State) -> {ok, IdBtree} = couch_btree:open(State, Fd, [ @@ -642,42 +683,41 @@ bind_id_tree(St, Fd, State) -> {join, fun couch_bt_engine:id_tree_join/2}, {reduce, fun couch_bt_engine:id_tree_reduce/2} ]), - St#st{id_tree=IdBtree}. - + St#st{id_tree = IdBtree}. merge_lookups(Infos, []) -> Infos; merge_lookups([], _) -> []; -merge_lookups([#doc_info{}=DI | RestInfos], [{ok, FDI} | RestLookups]) -> +merge_lookups([#doc_info{} = DI | RestInfos], [{ok, FDI} | RestLookups]) -> % Assert we've matched our lookups - if DI#doc_info.id == FDI#full_doc_info.id -> ok; true -> - erlang:error({mismatched_doc_infos, DI#doc_info.id}) + if + DI#doc_info.id == FDI#full_doc_info.id -> ok; + true -> erlang:error({mismatched_doc_infos, DI#doc_info.id}) end, [FDI | merge_lookups(RestInfos, RestLookups)]; merge_lookups([FDI | RestInfos], Lookups) -> [FDI | merge_lookups(RestInfos, Lookups)]. - -merge_docids(Iter, #merge_st{locs=Locs}=Acc) when length(Locs) > 1000 -> +merge_docids(Iter, #merge_st{locs = Locs} = Acc) when length(Locs) > 1000 -> #merge_st{ - src_fd=SrcFd, - id_tree=IdTree0, - seq_tree=SeqTree0, - rem_seqs=RemSeqs + src_fd = SrcFd, + id_tree = IdTree0, + seq_tree = SeqTree0, + rem_seqs = RemSeqs } = Acc, {ok, Infos} = couch_file:pread_terms(SrcFd, Locs), {ok, IdTree1} = couch_btree:add(IdTree0, Infos), {ok, SeqTree1} = couch_btree:add_remove(SeqTree0, [], RemSeqs), Acc1 = Acc#merge_st{ - id_tree=IdTree1, - seq_tree=SeqTree1, - rem_seqs=[], - locs=[] + id_tree = IdTree1, + seq_tree = SeqTree1, + rem_seqs = [], + locs = [] }, update_compact_task(length(Locs)), merge_docids(Iter, Acc1); -merge_docids(Iter, #merge_st{curr=Curr}=Acc) -> +merge_docids(Iter, #merge_st{curr = Curr} = Acc) -> case next_info(Iter, Curr, []) of {NextIter, NewCurr, Loc, Seqs} -> Acc1 = Acc#merge_st{ @@ -697,7 +737,6 @@ merge_docids(Iter, #merge_st{curr=Curr}=Acc) -> Acc end. - next_info(Iter, undefined, []) -> case couch_emsort:next(Iter) of {ok, {{Id, Seq}, Loc}, NextIter} -> @@ -715,15 +754,14 @@ next_info(Iter, {Id, Seq, Loc}, Seqs) -> {finished, Loc, Seqs} end. - update_compact_task(NumChanges) -> [Changes, Total] = couch_task_status:get([changes_done, total_changes]), Changes2 = Changes + NumChanges, - Progress = case Total of - 0 -> - 0; - _ -> - (Changes2 * 100) div Total - end, + Progress = + case Total of + 0 -> + 0; + _ -> + (Changes2 * 100) div Total + end, couch_task_status:update([{changes_done, Changes2}, {progress, Progress}]). - diff --git a/src/couch/src/couch_bt_engine_header.erl b/src/couch/src/couch_bt_engine_header.erl index 3f9f51821..e28f07723 100644 --- a/src/couch/src/couch_bt_engine_header.erl +++ b/src/couch/src/couch_bt_engine_header.erl @@ -12,7 +12,6 @@ -module(couch_bt_engine_header). - -export([ new/0, from/1, @@ -42,7 +41,6 @@ compacted_seq/1 ]). - % This should be updated anytime a header change happens that requires more % than filling in new defaults. % @@ -63,7 +61,8 @@ seq_tree_state = nil, local_tree_state = nil, purge_tree_state = nil, - purge_seq_tree_state = nil, %purge tree: purge_seq -> uuid + %purge tree: purge_seq -> uuid + purge_seq_tree_state = nil, security_ptr = nil, revs_limit = 1000, uuid, @@ -73,17 +72,14 @@ props_ptr }). - -define(PARTITION_DISK_VERSION, 8). - new() -> #db_header{ uuid = couch_uuids:random(), epochs = [{node(), 0}] }. - from(Header0) -> Header = upgrade(Header0), #db_header{ @@ -92,16 +88,15 @@ from(Header0) -> compacted_seq = Header#db_header.compacted_seq }. - is_header(Header) -> try upgrade(Header), true - catch _:_ -> - false + catch + _:_ -> + false end. - upgrade(Header) -> Funs = [ fun upgrade_tuple/1, @@ -110,93 +105,81 @@ upgrade(Header) -> fun upgrade_epochs/1, fun upgrade_compacted_seq/1 ], - lists:foldl(fun(F, HdrAcc) -> - F(HdrAcc) - end, Header, Funs). - + lists:foldl( + fun(F, HdrAcc) -> + F(HdrAcc) + end, + Header, + Funs + ). get(Header, Key) -> ?MODULE:get(Header, Key, undefined). - get(Header, Key, Default) -> get_field(Header, Key, Default). - set(Header, Key, Value) -> ?MODULE:set(Header, [{Key, Value}]). - set(Header0, Fields) -> % A subtlety here is that if a database was open during % the release upgrade that updates to uuids and epochs then % this dynamic upgrade also assigns a uuid and epoch. Header = upgrade(Header0), - lists:foldl(fun({Field, Value}, HdrAcc) -> - set_field(HdrAcc, Field, Value) - end, Header, Fields). - + lists:foldl( + fun({Field, Value}, HdrAcc) -> + set_field(HdrAcc, Field, Value) + end, + Header, + Fields + ). disk_version(Header) -> get_field(Header, disk_version). - latest_disk_version() -> - ?LATEST_DISK_VERSION. - + ?LATEST_DISK_VERSION. update_seq(Header) -> get_field(Header, update_seq). - id_tree_state(Header) -> get_field(Header, id_tree_state). - seq_tree_state(Header) -> get_field(Header, seq_tree_state). - local_tree_state(Header) -> get_field(Header, local_tree_state). - purge_tree_state(Header) -> get_field(Header, purge_tree_state). - purge_seq_tree_state(Header) -> get_field(Header, purge_seq_tree_state). - security_ptr(Header) -> get_field(Header, security_ptr). - revs_limit(Header) -> get_field(Header, revs_limit). - uuid(Header) -> get_field(Header, uuid). - epochs(Header) -> get_field(Header, epochs). - compacted_seq(Header) -> get_field(Header, compacted_seq). - purge_infos_limit(Header) -> get_field(Header, purge_infos_limit). - get_field(Header, Field) -> get_field(Header, Field, undefined). - get_field(Header, Field, Default) -> Idx = index(Field), case Idx > tuple_size(Header) of @@ -204,90 +187,103 @@ get_field(Header, Field, Default) -> false -> element(index(Field), Header) end. - set_field(Header, Field, Value) -> setelement(index(Field), Header, Value). - index(Field) -> couch_util:get_value(Field, indexes()). - indexes() -> Fields = record_info(fields, db_header), Indexes = lists:seq(2, record_info(size, db_header)), lists:zip(Fields, Indexes). - upgrade_tuple(Old) when is_record(Old, db_header) -> Old; upgrade_tuple(Old) when is_tuple(Old) -> NewSize = record_info(size, db_header), - if tuple_size(Old) < NewSize -> ok; true -> - erlang:error({invalid_header_size, Old}) + if + tuple_size(Old) < NewSize -> ok; + true -> erlang:error({invalid_header_size, Old}) end, - {_, New} = lists:foldl(fun(Val, {Idx, Hdr}) -> - {Idx+1, setelement(Idx, Hdr, Val)} - end, {1, #db_header{}}, tuple_to_list(Old)), - if is_record(New, db_header) -> ok; true -> - erlang:error({invalid_header_extension, {Old, New}}) + {_, New} = lists:foldl( + fun(Val, {Idx, Hdr}) -> + {Idx + 1, setelement(Idx, Hdr, Val)} + end, + {1, #db_header{}}, + tuple_to_list(Old) + ), + if + is_record(New, db_header) -> ok; + true -> erlang:error({invalid_header_extension, {Old, New}}) end, New. -define(OLD_DISK_VERSION_ERROR, - "Database files from versions smaller than 0.10.0 are no longer supported"). + "Database files from versions smaller than 0.10.0 are no longer supported" +). -upgrade_disk_version(#db_header{}=Header) -> +upgrade_disk_version(#db_header{} = Header) -> case element(2, Header) of - 1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); - 2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); - 3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); - 4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11) - 5 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre 1.2 - 6 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre clustered purge - 7 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre partitioned dbs - ?LATEST_DISK_VERSION -> Header; + 1 -> + throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); + 2 -> + throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); + 3 -> + throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); + % [0.10 - 0.11) + 4 -> + Header#db_header{security_ptr = nil}; + % pre 1.2 + 5 -> + Header#db_header{disk_version = ?LATEST_DISK_VERSION}; + % pre clustered purge + 6 -> + Header#db_header{disk_version = ?LATEST_DISK_VERSION}; + % pre partitioned dbs + 7 -> + Header#db_header{disk_version = ?LATEST_DISK_VERSION}; + ?LATEST_DISK_VERSION -> + Header; _ -> Reason = "Incorrect disk header version", throw({database_disk_version_error, Reason}) end. - -upgrade_uuid(#db_header{}=Header) -> +upgrade_uuid(#db_header{} = Header) -> case Header#db_header.uuid of undefined -> % Upgrading this old db file to a newer % on disk format that includes a UUID. - Header#db_header{uuid=couch_uuids:random()}; + Header#db_header{uuid = couch_uuids:random()}; _ -> Header end. - -upgrade_epochs(#db_header{}=Header) -> - NewEpochs = case Header#db_header.epochs of - undefined -> - % This node is taking over ownership of shard with - % and old version of couch file. Before epochs there - % was always an implicit assumption that a file was - % owned since eternity by the node it was on. This - % just codifies that assumption. - [{node(), 0}]; - [{Node, _} | _] = Epochs0 when Node == node() -> - % Current node is the current owner of this db - Epochs0; - Epochs1 -> - % This node is taking over ownership of this db - % and marking the update sequence where it happened. - [{node(), Header#db_header.update_seq} | Epochs1] - end, +upgrade_epochs(#db_header{} = Header) -> + NewEpochs = + case Header#db_header.epochs of + undefined -> + % This node is taking over ownership of shard with + % and old version of couch file. Before epochs there + % was always an implicit assumption that a file was + % owned since eternity by the node it was on. This + % just codifies that assumption. + [{node(), 0}]; + [{Node, _} | _] = Epochs0 when Node == node() -> + % Current node is the current owner of this db + Epochs0; + Epochs1 -> + % This node is taking over ownership of this db + % and marking the update sequence where it happened. + [{node(), Header#db_header.update_seq} | Epochs1] + end, % Its possible for a node to open a db and claim % ownership but never make a write to the db. This % removes nodes that claimed ownership but never % changed the database. DedupedEpochs = remove_dup_epochs(NewEpochs), - Header#db_header{epochs=DedupedEpochs}. - + Header#db_header{epochs = DedupedEpochs}. % This is slightly relying on the udpate_seq's being sorted % in epochs due to how we only ever push things onto the @@ -296,12 +292,12 @@ upgrade_epochs(#db_header{}=Header) -> % want to remove dupes (by calling a sort on the input to this % function). So for now we don't sort but are relying on the % idea that epochs is always sorted. -remove_dup_epochs([_]=Epochs) -> +remove_dup_epochs([_] = Epochs) -> Epochs; remove_dup_epochs([{N1, S}, {_N2, S}]) -> % Seqs match, keep the most recent owner [{N1, S}]; -remove_dup_epochs([_, _]=Epochs) -> +remove_dup_epochs([_, _] = Epochs) -> % Seqs don't match. Epochs; remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) -> @@ -311,11 +307,10 @@ remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) -> % Seqs don't match, recurse to check others [{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])]. - -upgrade_compacted_seq(#db_header{}=Header) -> +upgrade_compacted_seq(#db_header{} = Header) -> case Header#db_header.compacted_seq of undefined -> - Header#db_header{compacted_seq=0}; + Header#db_header{compacted_seq = 0}; _ -> Header end. @@ -332,20 +327,30 @@ latest(_Else) -> mk_header(Vsn) -> { - db_header, % record name - Vsn, % disk version - 100, % update_seq - 0, % unused - foo, % id_tree_state - bar, % seq_tree_state - bam, % local_tree_state - flam, % was purge_seq - now purge_tree_state - baz, % was purged_docs - now purge_seq_tree_state - bang, % security_ptr - 999 % revs_limit + % record name + db_header, + % disk version + Vsn, + % update_seq + 100, + % unused + 0, + % id_tree_state + foo, + % seq_tree_state + bar, + % local_tree_state + bam, + % was purge_seq - now purge_tree_state + flam, + % was purged_docs - now purge_seq_tree_state + baz, + % security_ptr + bang, + % revs_limit + 999 }. - -ifdef(run_broken_tests). upgrade_v3_test() -> @@ -388,7 +393,6 @@ upgrade_v5_to_v8_test() -> % Security ptr isn't changed for v5 headers ?assertEqual(bang, security_ptr(NewHeader)). - upgrade_uuid_test() -> Vsn5Header = mk_header(5), @@ -404,7 +408,6 @@ upgrade_uuid_test() -> ResetHeader = from(NewNewHeader), ?assertEqual(uuid(NewHeader), uuid(ResetHeader)). - upgrade_epochs_test() -> Vsn5Header = mk_header(5), @@ -437,15 +440,12 @@ upgrade_epochs_test() -> ResetHeader = from(NewNewHeader), ?assertEqual(OwnedEpochs, epochs(ResetHeader)). - get_uuid_from_old_header_test() -> Vsn5Header = mk_header(5), ?assertEqual(undefined, uuid(Vsn5Header)). - get_epochs_from_old_header_test() -> Vsn5Header = mk_header(5), ?assertEqual(undefined, epochs(Vsn5Header)). - -endif. diff --git a/src/couch/src/couch_bt_engine_stream.erl b/src/couch/src/couch_bt_engine_stream.erl index 431894a50..253877e77 100644 --- a/src/couch/src/couch_bt_engine_stream.erl +++ b/src/couch/src/couch_bt_engine_stream.erl @@ -20,23 +20,18 @@ to_disk_term/1 ]). - foldl({_Fd, []}, _Fun, Acc) -> Acc; - foldl({Fd, [{Pos, _} | Rest]}, Fun, Acc) -> foldl({Fd, [Pos | Rest]}, Fun, Acc); - foldl({Fd, [Bin | Rest]}, Fun, Acc) when is_binary(Bin) -> % We're processing the first bit of data % after we did a seek for a range fold. foldl({Fd, Rest}, Fun, Fun(Bin, Acc)); - foldl({Fd, [Pos | Rest]}, Fun, Acc) when is_integer(Pos) -> {ok, Bin} = couch_file:pread_binary(Fd, Pos), foldl({Fd, Rest}, Fun, Fun(Bin, Acc)). - seek({Fd, [{Pos, Length} | Rest]}, Offset) -> case Length =< Offset of true -> @@ -44,7 +39,6 @@ seek({Fd, [{Pos, Length} | Rest]}, Offset) -> false -> seek({Fd, [Pos | Rest]}, Offset) end; - seek({Fd, [Pos | Rest]}, Offset) when is_integer(Pos) -> {ok, Bin} = couch_file:pread_binary(Fd, Pos), case iolist_size(Bin) =< Offset of @@ -55,16 +49,12 @@ seek({Fd, [Pos | Rest]}, Offset) when is_integer(Pos) -> {ok, {Fd, [Tail | Rest]}} end. - write({Fd, Written}, Data) when is_pid(Fd) -> {ok, Pos, _} = couch_file:append_binary(Fd, Data), {ok, {Fd, [{Pos, iolist_size(Data)} | Written]}}. - finalize({Fd, Written}) -> {ok, {Fd, lists:reverse(Written)}}. - to_disk_term({_Fd, Written}) -> {ok, Written}. - diff --git a/src/couch/src/couch_btree.erl b/src/couch/src/couch_btree.erl index 858ae2b90..b974a22ee 100644 --- a/src/couch/src/couch_btree.erl +++ b/src/couch/src/couch_btree.erl @@ -21,45 +21,45 @@ -define(FILL_RATIO, 0.5). -extract(#btree{extract_kv=undefined}, Value) -> +extract(#btree{extract_kv = undefined}, Value) -> Value; -extract(#btree{extract_kv=Extract}, Value) -> +extract(#btree{extract_kv = Extract}, Value) -> Extract(Value). -assemble(#btree{assemble_kv=undefined}, Key, Value) -> +assemble(#btree{assemble_kv = undefined}, Key, Value) -> {Key, Value}; -assemble(#btree{assemble_kv=Assemble}, Key, Value) -> +assemble(#btree{assemble_kv = Assemble}, Key, Value) -> Assemble(Key, Value). -less(#btree{less=undefined}, A, B) -> +less(#btree{less = undefined}, A, B) -> A < B; -less(#btree{less=Less}, A, B) -> +less(#btree{less = Less}, A, B) -> Less(A, B). % pass in 'nil' for State if a new Btree. open(State, Fd) -> - {ok, #btree{root=State, fd=Fd}}. + {ok, #btree{root = State, fd = Fd}}. set_options(Bt, []) -> Bt; -set_options(Bt, [{split, Extract}|Rest]) -> - set_options(Bt#btree{extract_kv=Extract}, Rest); -set_options(Bt, [{join, Assemble}|Rest]) -> - set_options(Bt#btree{assemble_kv=Assemble}, Rest); -set_options(Bt, [{less, Less}|Rest]) -> - set_options(Bt#btree{less=Less}, Rest); -set_options(Bt, [{reduce, Reduce}|Rest]) -> - set_options(Bt#btree{reduce=Reduce}, Rest); -set_options(Bt, [{compression, Comp}|Rest]) -> - set_options(Bt#btree{compression=Comp}, Rest). +set_options(Bt, [{split, Extract} | Rest]) -> + set_options(Bt#btree{extract_kv = Extract}, Rest); +set_options(Bt, [{join, Assemble} | Rest]) -> + set_options(Bt#btree{assemble_kv = Assemble}, Rest); +set_options(Bt, [{less, Less} | Rest]) -> + set_options(Bt#btree{less = Less}, Rest); +set_options(Bt, [{reduce, Reduce} | Rest]) -> + set_options(Bt#btree{reduce = Reduce}, Rest); +set_options(Bt, [{compression, Comp} | Rest]) -> + set_options(Bt#btree{compression = Comp}, Rest). open(State, Fd, Options) -> - {ok, set_options(#btree{root=State, fd=Fd}, Options)}. + {ok, set_options(#btree{root = State, fd = Fd}, Options)}. -get_state(#btree{root=Root}) -> +get_state(#btree{root = Root}) -> Root. -final_reduce(#btree{reduce=Reduce}, Val) -> +final_reduce(#btree{reduce = Reduce}, Val) -> final_reduce(Reduce, Val); final_reduce(Reduce, {[], []}) -> Reduce(reduce, []); @@ -71,30 +71,42 @@ final_reduce(Reduce, {KVs, Reductions}) -> Red = Reduce(reduce, KVs), final_reduce(Reduce, {[], [Red | Reductions]}). -fold_reduce(#btree{root=Root}=Bt, Fun, Acc, Options) -> +fold_reduce(#btree{root = Root} = Bt, Fun, Acc, Options) -> Dir = couch_util:get_value(dir, Options, fwd), StartKey = couch_util:get_value(start_key, Options), InEndRangeFun = make_key_in_end_range_function(Bt, Dir, Options), KeyGroupFun = get_group_fun(Bt, Options), try {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} = - reduce_stream_node(Bt, Dir, Root, StartKey, InEndRangeFun, undefined, [], [], - KeyGroupFun, Fun, Acc), - if GroupedKey2 == undefined -> - {ok, Acc2}; - true -> - case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of - {ok, Acc3} -> {ok, Acc3}; - {stop, Acc3} -> {ok, Acc3} - end + reduce_stream_node( + Bt, + Dir, + Root, + StartKey, + InEndRangeFun, + undefined, + [], + [], + KeyGroupFun, + Fun, + Acc + ), + if + GroupedKey2 == undefined -> + {ok, Acc2}; + true -> + case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of + {ok, Acc3} -> {ok, Acc3}; + {stop, Acc3} -> {ok, Acc3} + end end catch throw:{stop, AccDone} -> {ok, AccDone} end. -full_reduce(#btree{root=nil,reduce=Reduce}) -> +full_reduce(#btree{root = nil, reduce = Reduce}) -> {ok, Reduce(reduce, [])}; -full_reduce(#btree{root=Root}) -> +full_reduce(#btree{root = Root}) -> {ok, element(2, Root)}. size(#btree{root = nil}) -> @@ -114,7 +126,7 @@ get_group_fun(Bt, Options) -> N when is_integer(N), N > 0 -> make_group_fun(Bt, N); undefined -> - couch_util:get_value(key_group_fun, Options, fun(_,_) -> true end) + couch_util:get_value(key_group_fun, Options, fun(_, _) -> true end) end. make_group_fun(Bt, exact) -> @@ -135,7 +147,7 @@ make_group_fun(Bt, GroupLevel) when is_integer(GroupLevel), GroupLevel > 0 -> fun GF({{p, Partition, Key1}, Val1}, {{p, Partition, Key2}, Val2}) -> GF({Key1, Val1}, {Key2, Val2}); - GF({[_|_] = Key1, _}, {[_|_] = Key2, _}) -> + GF({[_ | _] = Key1, _}, {[_ | _] = Key2, _}) -> SL1 = lists:sublist(Key1, GroupLevel), SL2 = lists:sublist(Key2, GroupLevel), case less(Bt, {SL1, nil}, {SL2, nil}) of @@ -175,61 +187,75 @@ convert_fun_arity(Fun) when is_function(Fun, 3) -> (traverse, _K, _Red, AccIn) -> {ok, AccIn} end; convert_fun_arity(Fun) when is_function(Fun, 4) -> - Fun. % Already arity 4 + % Already arity 4 + Fun. make_key_in_end_range_function(Bt, fwd, Options) -> case couch_util:get_value(end_key_gt, Options) of - undefined -> - case couch_util:get_value(end_key, Options) of undefined -> - fun(_Key) -> true end; - LastKey -> - fun(Key) -> not less(Bt, LastKey, Key) end - end; - EndKey -> - fun(Key) -> less(Bt, Key, EndKey) end + case couch_util:get_value(end_key, Options) of + undefined -> + fun(_Key) -> true end; + LastKey -> + fun(Key) -> not less(Bt, LastKey, Key) end + end; + EndKey -> + fun(Key) -> less(Bt, Key, EndKey) end end; make_key_in_end_range_function(Bt, rev, Options) -> case couch_util:get_value(end_key_gt, Options) of - undefined -> - case couch_util:get_value(end_key, Options) of undefined -> - fun(_Key) -> true end; - LastKey -> - fun(Key) -> not less(Bt, Key, LastKey) end - end; - EndKey -> - fun(Key) -> less(Bt, EndKey, Key) end + case couch_util:get_value(end_key, Options) of + undefined -> + fun(_Key) -> true end; + LastKey -> + fun(Key) -> not less(Bt, Key, LastKey) end + end; + EndKey -> + fun(Key) -> less(Bt, EndKey, Key) end end. - foldl(Bt, Fun, Acc) -> fold(Bt, Fun, Acc, []). foldl(Bt, Fun, Acc, Options) -> fold(Bt, Fun, Acc, Options). - -fold(#btree{root=nil}, _Fun, Acc, _Options) -> +fold(#btree{root = nil}, _Fun, Acc, _Options) -> {ok, {[], []}, Acc}; -fold(#btree{root=Root}=Bt, Fun, Acc, Options) -> +fold(#btree{root = Root} = Bt, Fun, Acc, Options) -> Dir = couch_util:get_value(dir, Options, fwd), InRange = make_key_in_end_range_function(Bt, Dir, Options), Result = - case couch_util:get_value(start_key, Options) of - undefined -> - stream_node(Bt, [], Bt#btree.root, InRange, Dir, - convert_fun_arity(Fun), Acc); - StartKey -> - stream_node(Bt, [], Bt#btree.root, StartKey, InRange, Dir, - convert_fun_arity(Fun), Acc) - end, + case couch_util:get_value(start_key, Options) of + undefined -> + stream_node( + Bt, + [], + Bt#btree.root, + InRange, + Dir, + convert_fun_arity(Fun), + Acc + ); + StartKey -> + stream_node( + Bt, + [], + Bt#btree.root, + StartKey, + InRange, + Dir, + convert_fun_arity(Fun), + Acc + ) + end, case Result of - {ok, Acc2}-> - FullReduction = element(2, Root), - {ok, {[], [FullReduction]}, Acc2}; - {stop, LastReduction, Acc2} -> - {ok, LastReduction, Acc2} + {ok, Acc2} -> + FullReduction = element(2, Root), + {ok, {[], [FullReduction]}, Acc2}; + {stop, LastReduction, Acc2} -> + {ok, LastReduction, Acc2} end. add(Bt, InsertKeyValues) -> @@ -240,27 +266,28 @@ add_remove(Bt, InsertKeyValues, RemoveKeys) -> {ok, Bt2}. query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) -> - #btree{root=Root} = Bt, + #btree{root = Root} = Bt, InsertActions = lists:map( fun(KeyValue) -> {Key, Value} = extract(Bt, KeyValue), {insert, Key, Value} - end, InsertValues), + end, + InsertValues + ), RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys], FetchActions = [{fetch, Key, nil} || Key <- LookupKeys], SortFun = fun({OpA, A, _}, {OpB, B, _}) -> case A == B of - % A and B are equal, sort by op. - true -> op_order(OpA) < op_order(OpB); - false -> - less(Bt, A, B) + % A and B are equal, sort by op. + true -> op_order(OpA) < op_order(OpB); + false -> less(Bt, A, B) end end, Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])), {ok, KeyPointers, QueryResults} = modify_node(Bt, Root, Actions, []), {ok, NewRoot} = complete_root(Bt, KeyPointers), - {ok, QueryResults, Bt#btree{root=NewRoot}}. + {ok, QueryResults, Bt#btree{root = NewRoot}}. % for ordering different operations with the same key. % fetch < remove < insert @@ -268,11 +295,12 @@ op_order(fetch) -> 1; op_order(remove) -> 2; op_order(insert) -> 3. -lookup(#btree{root=Root, less=Less}=Bt, Keys) -> - SortedKeys = case Less of - undefined -> lists:sort(Keys); - _ -> lists:sort(Less, Keys) - end, +lookup(#btree{root = Root, less = Less} = Bt, Keys) -> + SortedKeys = + case Less of + undefined -> lists:sort(Keys); + _ -> lists:sort(Less, Keys) + end, {ok, SortedResults} = lookup(Bt, Root, SortedKeys), % We want to return the results in the same order as the keys were input % but we may have changed the order when we sorted. So we need to put the @@ -285,10 +313,10 @@ lookup(Bt, Node, Keys) -> Pointer = element(1, Node), {NodeType, NodeList} = get_node(Bt, Pointer), case NodeType of - kp_node -> - lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []); - kv_node -> - lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, []) + kp_node -> + lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []); + kv_node -> + lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, []) end. lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) -> @@ -300,14 +328,13 @@ lookup_kpnode(Bt, NodeTuple, LowerBound, [FirstLookupKey | _] = LookupKeys, Outp {Key, PointerInfo} = element(N, NodeTuple), SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end, case lists:splitwith(SplitFun, LookupKeys) of - {[], GreaterQueries} -> - lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output); - {LessEqQueries, GreaterQueries} -> - {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries), - lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output)) + {[], GreaterQueries} -> + lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output); + {LessEqQueries, GreaterQueries} -> + {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries), + lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output)) end. - lookup_kvnode(_Bt, _NodeTuple, _LowerBound, [], Output) -> {ok, lists:reverse(Output)}; lookup_kvnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound -> @@ -317,24 +344,27 @@ lookup_kvnode(Bt, NodeTuple, LowerBound, [LookupKey | RestLookupKeys], Output) - N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), LookupKey), {Key, Value} = element(N, NodeTuple), case less(Bt, LookupKey, Key) of - true -> - % LookupKey is less than Key - lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]); - false -> - case less(Bt, Key, LookupKey) of true -> - % LookupKey is greater than Key - lookup_kvnode(Bt, NodeTuple, N+1, RestLookupKeys, [{LookupKey, not_found} | Output]); + % LookupKey is less than Key + lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]); false -> - % LookupKey is equal to Key - lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output]) - end + case less(Bt, Key, LookupKey) of + true -> + % LookupKey is greater than Key + lookup_kvnode(Bt, NodeTuple, N + 1, RestLookupKeys, [ + {LookupKey, not_found} | Output + ]); + false -> + % LookupKey is equal to Key + lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [ + {LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output + ]) + end end. - complete_root(_Bt, []) -> {ok, nil}; -complete_root(_Bt, [{_Key, PointerInfo}])-> +complete_root(_Bt, [{_Key, PointerInfo}]) -> {ok, PointerInfo}; complete_root(Bt, KPs) -> {ok, ResultKeyPointers} = write_node(Bt, kp_node, KPs), @@ -348,12 +378,12 @@ complete_root(Bt, KPs) -> chunkify(InList) -> BaseChunkSize = get_chunk_size(), case ?term_size(InList) of - Size when Size > BaseChunkSize -> - NumberOfChunksLikely = ((Size div BaseChunkSize) + 1), - ChunkThreshold = Size div NumberOfChunksLikely, - chunkify(InList, ChunkThreshold, [], 0, []); - _Else -> - [InList] + Size when Size > BaseChunkSize -> + NumberOfChunksLikely = ((Size div BaseChunkSize) + 1), + ChunkThreshold = Size div NumberOfChunksLikely, + chunkify(InList, ChunkThreshold, [], 0, []); + _Else -> + [InList] end. chunkify([], _ChunkThreshold, [], 0, OutputChunks) -> @@ -365,58 +395,67 @@ chunkify([], _ChunkThreshold, OutList, _OutListSize, OutputChunks) -> lists:reverse([lists:reverse(OutList) | OutputChunks]); chunkify([InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) -> case ?term_size(InElement) of - Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] -> - chunkify(RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]); - Size -> - chunkify(RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks) + Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] -> + chunkify(RestInList, ChunkThreshold, [], 0, [ + lists:reverse([InElement | OutList]) | OutputChunks + ]); + Size -> + chunkify( + RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks + ) end. --compile({inline,[get_chunk_size/0]}). +-compile({inline, [get_chunk_size/0]}). get_chunk_size() -> try list_to_integer(config:get("couchdb", "btree_chunk_size", "1279")) - catch error:badarg -> - 1279 + catch + error:badarg -> + 1279 end. modify_node(Bt, RootPointerInfo, Actions, QueryOutput) -> - {NodeType, NodeList} = case RootPointerInfo of - nil -> - {kv_node, []}; - _Tuple -> - Pointer = element(1, RootPointerInfo), - get_node(Bt, Pointer) - end, + {NodeType, NodeList} = + case RootPointerInfo of + nil -> + {kv_node, []}; + _Tuple -> + Pointer = element(1, RootPointerInfo), + get_node(Bt, Pointer) + end, NodeTuple = list_to_tuple(NodeList), {ok, NewNodeList, QueryOutput2} = - case NodeType of - kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput); - kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput) - end, + case NodeType of + kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput); + kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput) + end, case NewNodeList of - [] -> % no nodes remain - {ok, [], QueryOutput2}; - NodeList -> % nothing changed - {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple), - {ok, [{LastKey, RootPointerInfo}], QueryOutput2}; - _Else2 -> - {ok, ResultList} = case RootPointerInfo of - nil -> - write_node(Bt, NodeType, NewNodeList); - _ -> + % no nodes remain + [] -> + {ok, [], QueryOutput2}; + % nothing changed + NodeList -> {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple), - OldNode = {LastKey, RootPointerInfo}, - write_node(Bt, OldNode, NodeType, NodeList, NewNodeList) - end, - {ok, ResultList, QueryOutput2} + {ok, [{LastKey, RootPointerInfo}], QueryOutput2}; + _Else2 -> + {ok, ResultList} = + case RootPointerInfo of + nil -> + write_node(Bt, NodeType, NewNodeList); + _ -> + {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple), + OldNode = {LastKey, RootPointerInfo}, + write_node(Bt, OldNode, NodeType, NodeList, NewNodeList) + end, + {ok, ResultList, QueryOutput2} end. -reduce_node(#btree{reduce=nil}, _NodeType, _NodeList) -> +reduce_node(#btree{reduce = nil}, _NodeType, _NodeList) -> []; -reduce_node(#btree{reduce=R}, kp_node, NodeList) -> +reduce_node(#btree{reduce = R}, kp_node, NodeList) -> R(rereduce, [element(2, Node) || {_K, Node} <- NodeList]); -reduce_node(#btree{reduce=R}=Bt, kv_node, NodeList) -> +reduce_node(#btree{reduce = R} = Bt, kv_node, NodeList) -> R(reduce, [assemble(Bt, K, V) || {K, V} <- NodeList]). reduce_tree_size(kv_node, NodeSize, _KvList) -> @@ -444,17 +483,14 @@ write_node(#btree{fd = Fd, compression = Comp} = Bt, NodeType, NodeList) -> {ok, PtrSizes} = couch_file:append_terms(Fd, ToWrite, WriteOpts), {ok, group_kps(Bt, NodeType, Chunks, PtrSizes)}. - group_kps(_Bt, _NodeType, [], []) -> []; - group_kps(Bt, NodeType, [Chunk | RestChunks], [{Ptr, Size} | RestPtrSizes]) -> {LastKey, _} = lists:last(Chunk), SubTreeSize = reduce_tree_size(NodeType, Size, Chunk), KP = {LastKey, {Ptr, reduce_node(Bt, NodeType, Chunk), SubTreeSize}}, [KP | group_kps(Bt, NodeType, RestChunks, RestPtrSizes)]. - write_node(Bt, _OldNode, NodeType, [], NewList) -> write_node(Bt, NodeType, NewList); write_node(Bt, _OldNode, NodeType, [_], NewList) -> @@ -462,14 +498,16 @@ write_node(Bt, _OldNode, NodeType, [_], NewList) -> write_node(Bt, OldNode, NodeType, OldList, NewList) -> case can_reuse_old_node(OldList, NewList) of {true, Prefix, Suffix} -> - {ok, PrefixKVs} = case Prefix of - [] -> {ok, []}; - _ -> write_node(Bt, NodeType, Prefix) - end, - {ok, SuffixKVs} = case Suffix of - [] -> {ok, []}; - _ -> write_node(Bt, NodeType, Suffix) - end, + {ok, PrefixKVs} = + case Prefix of + [] -> {ok, []}; + _ -> write_node(Bt, NodeType, Prefix) + end, + {ok, SuffixKVs} = + case Suffix of + [] -> {ok, []}; + _ -> write_node(Bt, NodeType, Suffix) + end, Result = PrefixKVs ++ [OldNode] ++ SuffixKVs, {ok, Result}; false -> @@ -481,8 +519,9 @@ can_reuse_old_node(OldList, NewList) -> case old_list_is_prefix(OldList, RestNewList, 0) of {true, Size, Suffix} -> ReuseThreshold = get_chunk_size() * ?FILL_RATIO, - if Size < ReuseThreshold -> false; true -> - {true, Prefix, Suffix} + if + Size < ReuseThreshold -> false; + true -> {true, Prefix, Suffix} end; false -> false @@ -510,38 +549,67 @@ old_list_is_prefix(_OldList, _NewList, _Acc) -> modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) -> modify_node(Bt, nil, Actions, QueryOutput); modify_kpnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) -> - {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, - tuple_size(NodeTuple), [])), QueryOutput}; -modify_kpnode(Bt, NodeTuple, LowerBound, - [{_, FirstActionKey, _}|_]=Actions, ResultNode, QueryOutput) -> + {ok, + lists:reverse( + ResultNode, + bounded_tuple_to_list( + NodeTuple, + LowerBound, + tuple_size(NodeTuple), + [] + ) + ), + QueryOutput}; +modify_kpnode( + Bt, + NodeTuple, + LowerBound, + [{_, FirstActionKey, _} | _] = Actions, + ResultNode, + QueryOutput +) -> Sz = tuple_size(NodeTuple), N = find_first_gteq(Bt, NodeTuple, LowerBound, Sz, FirstActionKey), case N =:= Sz of - true -> - % perform remaining actions on last node - {_, PointerInfo} = element(Sz, NodeTuple), - {ok, ChildKPs, QueryOutput2} = - modify_node(Bt, PointerInfo, Actions, QueryOutput), - NodeList = lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, - Sz - 1, ChildKPs)), - {ok, NodeList, QueryOutput2}; - false -> - {NodeKey, PointerInfo} = element(N, NodeTuple), - SplitFun = fun({_ActionType, ActionKey, _ActionValue}) -> + true -> + % perform remaining actions on last node + {_, PointerInfo} = element(Sz, NodeTuple), + {ok, ChildKPs, QueryOutput2} = + modify_node(Bt, PointerInfo, Actions, QueryOutput), + NodeList = lists:reverse( + ResultNode, + bounded_tuple_to_list( + NodeTuple, + LowerBound, + Sz - 1, + ChildKPs + ) + ), + {ok, NodeList, QueryOutput2}; + false -> + {NodeKey, PointerInfo} = element(N, NodeTuple), + SplitFun = fun({_ActionType, ActionKey, _ActionValue}) -> not less(Bt, NodeKey, ActionKey) end, - {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions), - {ok, ChildKPs, QueryOutput2} = + {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions), + {ok, ChildKPs, QueryOutput2} = modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput), - ResultNode2 = lists:reverse(ChildKPs, bounded_tuple_to_revlist(NodeTuple, - LowerBound, N - 1, ResultNode)), - modify_kpnode(Bt, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2) + ResultNode2 = lists:reverse( + ChildKPs, + bounded_tuple_to_revlist( + NodeTuple, + LowerBound, + N - 1, + ResultNode + ) + ), + modify_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, ResultNode2, QueryOutput2) end. bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End -> Tail; bounded_tuple_to_revlist(Tuple, Start, End, Tail) -> - bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]). + bounded_tuple_to_revlist(Tuple, Start + 1, End, [element(Start, Tuple) | Tail]). bounded_tuple_to_list(Tuple, Start, End, Tail) -> bounded_tuple_to_list2(Tuple, Start, End, [], Tail). @@ -557,191 +625,438 @@ find_first_gteq(Bt, Tuple, Start, End, Key) -> Mid = Start + ((End - Start) div 2), {TupleKey, _} = element(Mid, Tuple), case less(Bt, TupleKey, Key) of - true -> - find_first_gteq(Bt, Tuple, Mid+1, End, Key); - false -> - find_first_gteq(Bt, Tuple, Start, Mid, Key) + true -> + find_first_gteq(Bt, Tuple, Mid + 1, End, Key); + false -> + find_first_gteq(Bt, Tuple, Start, Mid, Key) end. modify_kvnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) -> - {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])), QueryOutput}; -modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) when LowerBound > tuple_size(NodeTuple) -> + {ok, + lists:reverse( + ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), []) + ), + QueryOutput}; +modify_kvnode( + Bt, + NodeTuple, + LowerBound, + [{ActionType, ActionKey, ActionValue} | RestActions], + ResultNode, + QueryOutput +) when LowerBound > tuple_size(NodeTuple) -> case ActionType of - insert -> - modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput); - remove -> - % just drop the action - modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput); - fetch -> - % the key/value must not exist in the tree - modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput]) + insert -> + modify_kvnode( + Bt, + NodeTuple, + LowerBound, + RestActions, + [{ActionKey, ActionValue} | ResultNode], + QueryOutput + ); + remove -> + % just drop the action + modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput); + fetch -> + % the key/value must not exist in the tree + modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [ + {not_found, {ActionKey, nil}} | QueryOutput + ]) end; -modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], AccNode, QueryOutput) -> +modify_kvnode( + Bt, + NodeTuple, + LowerBound, + [{ActionType, ActionKey, ActionValue} | RestActions], + AccNode, + QueryOutput +) -> N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), ActionKey), {Key, Value} = element(N, NodeTuple), - ResultNode = bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode), + ResultNode = bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode), case less(Bt, ActionKey, Key) of - true -> - case ActionType of - insert -> - % ActionKey is less than the Key, so insert - modify_kvnode(Bt, NodeTuple, N, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput); - remove -> - % ActionKey is less than the Key, just drop the action - modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput); - fetch -> - % ActionKey is less than the Key, the key/value must not exist in the tree - modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput]) - end; - false -> - % ActionKey and Key are maybe equal. - case less(Bt, Key, ActionKey) of - false -> + true -> case ActionType of - insert -> - modify_kvnode(Bt, NodeTuple, N+1, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput); - remove -> - modify_kvnode(Bt, NodeTuple, N+1, RestActions, ResultNode, QueryOutput); - fetch -> - % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node - % since an identical action key can follow it. - modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput]) + insert -> + % ActionKey is less than the Key, so insert + modify_kvnode( + Bt, + NodeTuple, + N, + RestActions, + [{ActionKey, ActionValue} | ResultNode], + QueryOutput + ); + remove -> + % ActionKey is less than the Key, just drop the action + modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput); + fetch -> + % ActionKey is less than the Key, the key/value must not exist in the tree + modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [ + {not_found, {ActionKey, nil}} | QueryOutput + ]) end; - true -> - modify_kvnode(Bt, NodeTuple, N + 1, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput) - end + false -> + % ActionKey and Key are maybe equal. + case less(Bt, Key, ActionKey) of + false -> + case ActionType of + insert -> + modify_kvnode( + Bt, + NodeTuple, + N + 1, + RestActions, + [{ActionKey, ActionValue} | ResultNode], + QueryOutput + ); + remove -> + modify_kvnode( + Bt, NodeTuple, N + 1, RestActions, ResultNode, QueryOutput + ); + fetch -> + % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node + % since an identical action key can follow it. + modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [ + {ok, assemble(Bt, Key, Value)} | QueryOutput + ]) + end; + true -> + modify_kvnode( + Bt, + NodeTuple, + N + 1, + [{ActionType, ActionKey, ActionValue} | RestActions], + [{Key, Value} | ResultNode], + QueryOutput + ) + end end. - -reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _InEndRangeFun, GroupedKey, GroupedKVsAcc, - GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) -> +reduce_stream_node( + _Bt, + _Dir, + nil, + _KeyStart, + _InEndRangeFun, + GroupedKey, + GroupedKVsAcc, + GroupedRedsAcc, + _KeyGroupFun, + _Fun, + Acc +) -> {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey}; -reduce_stream_node(Bt, Dir, Node, KeyStart, InEndRangeFun, GroupedKey, GroupedKVsAcc, - GroupedRedsAcc, KeyGroupFun, Fun, Acc) -> +reduce_stream_node( + Bt, + Dir, + Node, + KeyStart, + InEndRangeFun, + GroupedKey, + GroupedKVsAcc, + GroupedRedsAcc, + KeyGroupFun, + Fun, + Acc +) -> P = element(1, Node), case get_node(Bt, P) of - {kp_node, NodeList} -> - NodeList2 = adjust_dir(Dir, NodeList), - reduce_stream_kp_node(Bt, Dir, NodeList2, KeyStart, InEndRangeFun, GroupedKey, - GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc); - {kv_node, KVs} -> - KVs2 = adjust_dir(Dir, KVs), - reduce_stream_kv_node(Bt, Dir, KVs2, KeyStart, InEndRangeFun, GroupedKey, - GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) + {kp_node, NodeList} -> + NodeList2 = adjust_dir(Dir, NodeList), + reduce_stream_kp_node( + Bt, + Dir, + NodeList2, + KeyStart, + InEndRangeFun, + GroupedKey, + GroupedKVsAcc, + GroupedRedsAcc, + KeyGroupFun, + Fun, + Acc + ); + {kv_node, KVs} -> + KVs2 = adjust_dir(Dir, KVs), + reduce_stream_kv_node( + Bt, + Dir, + KVs2, + KeyStart, + InEndRangeFun, + GroupedKey, + GroupedKVsAcc, + GroupedRedsAcc, + KeyGroupFun, + Fun, + Acc + ) end. -reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, InEndRangeFun, - GroupedKey, GroupedKVsAcc, GroupedRedsAcc, - KeyGroupFun, Fun, Acc) -> - +reduce_stream_kv_node( + Bt, + Dir, + KVs, + KeyStart, + InEndRangeFun, + GroupedKey, + GroupedKVsAcc, + GroupedRedsAcc, + KeyGroupFun, + Fun, + Acc +) -> GTEKeyStartKVs = - case KeyStart of - undefined -> - KVs; - _ -> - DropFun = case Dir of - fwd -> - fun({Key, _}) -> less(Bt, Key, KeyStart) end; - rev -> - fun({Key, _}) -> less(Bt, KeyStart, Key) end + case KeyStart of + undefined -> + KVs; + _ -> + DropFun = + case Dir of + fwd -> + fun({Key, _}) -> less(Bt, Key, KeyStart) end; + rev -> + fun({Key, _}) -> less(Bt, KeyStart, Key) end + end, + lists:dropwhile(DropFun, KVs) end, - lists:dropwhile(DropFun, KVs) - end, KVs2 = lists:takewhile( - fun({Key, _}) -> InEndRangeFun(Key) end, GTEKeyStartKVs), - reduce_stream_kv_node2(Bt, KVs2, GroupedKey, GroupedKVsAcc, GroupedRedsAcc, - KeyGroupFun, Fun, Acc). - - -reduce_stream_kv_node2(_Bt, [], GroupedKey, GroupedKVsAcc, GroupedRedsAcc, - _KeyGroupFun, _Fun, Acc) -> + fun({Key, _}) -> InEndRangeFun(Key) end, GTEKeyStartKVs + ), + reduce_stream_kv_node2( + Bt, + KVs2, + GroupedKey, + GroupedKVsAcc, + GroupedRedsAcc, + KeyGroupFun, + Fun, + Acc + ). + +reduce_stream_kv_node2( + _Bt, + [], + GroupedKey, + GroupedKVsAcc, + GroupedRedsAcc, + _KeyGroupFun, + _Fun, + Acc +) -> {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey}; -reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc, - GroupedRedsAcc, KeyGroupFun, Fun, Acc) -> +reduce_stream_kv_node2( + Bt, + [{Key, Value} | RestKVs], + GroupedKey, + GroupedKVsAcc, + GroupedRedsAcc, + KeyGroupFun, + Fun, + Acc +) -> case GroupedKey of - undefined -> - reduce_stream_kv_node2(Bt, RestKVs, Key, - [assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc); - _ -> - - case KeyGroupFun(GroupedKey, Key) of - true -> - reduce_stream_kv_node2(Bt, RestKVs, GroupedKey, - [assemble(Bt,Key,Value)|GroupedKVsAcc], GroupedRedsAcc, KeyGroupFun, - Fun, Acc); - false -> - case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of - {ok, Acc2} -> - reduce_stream_kv_node2(Bt, RestKVs, Key, [assemble(Bt,Key,Value)], - [], KeyGroupFun, Fun, Acc2); - {stop, Acc2} -> - throw({stop, Acc2}) + undefined -> + reduce_stream_kv_node2( + Bt, + RestKVs, + Key, + [assemble(Bt, Key, Value)], + [], + KeyGroupFun, + Fun, + Acc + ); + _ -> + case KeyGroupFun(GroupedKey, Key) of + true -> + reduce_stream_kv_node2( + Bt, + RestKVs, + GroupedKey, + [assemble(Bt, Key, Value) | GroupedKVsAcc], + GroupedRedsAcc, + KeyGroupFun, + Fun, + Acc + ); + false -> + case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of + {ok, Acc2} -> + reduce_stream_kv_node2( + Bt, + RestKVs, + Key, + [assemble(Bt, Key, Value)], + [], + KeyGroupFun, + Fun, + Acc2 + ); + {stop, Acc2} -> + throw({stop, Acc2}) + end end - end end. -reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, InEndRangeFun, - GroupedKey, GroupedKVsAcc, GroupedRedsAcc, - KeyGroupFun, Fun, Acc) -> +reduce_stream_kp_node( + Bt, + Dir, + NodeList, + KeyStart, + InEndRangeFun, + GroupedKey, + GroupedKVsAcc, + GroupedRedsAcc, + KeyGroupFun, + Fun, + Acc +) -> Nodes = - case KeyStart of - undefined -> - NodeList; - _ -> - case Dir of - fwd -> - lists:dropwhile(fun({Key, _}) -> less(Bt, Key, KeyStart) end, NodeList); - rev -> - RevKPs = lists:reverse(NodeList), - case lists:splitwith(fun({Key, _}) -> less(Bt, Key, KeyStart) end, RevKPs) of - {_Before, []} -> + case KeyStart of + undefined -> NodeList; - {Before, [FirstAfter | _]} -> - [FirstAfter | lists:reverse(Before)] - end - end - end, + _ -> + case Dir of + fwd -> + lists:dropwhile(fun({Key, _}) -> less(Bt, Key, KeyStart) end, NodeList); + rev -> + RevKPs = lists:reverse(NodeList), + case + lists:splitwith(fun({Key, _}) -> less(Bt, Key, KeyStart) end, RevKPs) + of + {_Before, []} -> + NodeList; + {Before, [FirstAfter | _]} -> + [FirstAfter | lists:reverse(Before)] + end + end + end, {InRange, MaybeInRange} = lists:splitwith( - fun({Key, _}) -> InEndRangeFun(Key) end, Nodes), - NodesInRange = case MaybeInRange of - [FirstMaybeInRange | _] when Dir =:= fwd -> - InRange ++ [FirstMaybeInRange]; - _ -> - InRange - end, - reduce_stream_kp_node2(Bt, Dir, NodesInRange, KeyStart, InEndRangeFun, - GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc). - - -reduce_stream_kp_node2(Bt, Dir, [{_Key, NodeInfo} | RestNodeList], KeyStart, InEndRangeFun, - undefined, [], [], KeyGroupFun, Fun, Acc) -> + fun({Key, _}) -> InEndRangeFun(Key) end, Nodes + ), + NodesInRange = + case MaybeInRange of + [FirstMaybeInRange | _] when Dir =:= fwd -> + InRange ++ [FirstMaybeInRange]; + _ -> + InRange + end, + reduce_stream_kp_node2( + Bt, + Dir, + NodesInRange, + KeyStart, + InEndRangeFun, + GroupedKey, + GroupedKVsAcc, + GroupedRedsAcc, + KeyGroupFun, + Fun, + Acc + ). + +reduce_stream_kp_node2( + Bt, + Dir, + [{_Key, NodeInfo} | RestNodeList], + KeyStart, + InEndRangeFun, + undefined, + [], + [], + KeyGroupFun, + Fun, + Acc +) -> {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} = - reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, undefined, - [], [], KeyGroupFun, Fun, Acc), - reduce_stream_kp_node2(Bt, Dir, RestNodeList, KeyStart, InEndRangeFun, GroupedKey2, - GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2); -reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, InEndRangeFun, - GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) -> - {Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) -> - KeyGroupFun(GroupedKey, Key) end, NodeList), + reduce_stream_node( + Bt, + Dir, + NodeInfo, + KeyStart, + InEndRangeFun, + undefined, + [], + [], + KeyGroupFun, + Fun, + Acc + ), + reduce_stream_kp_node2( + Bt, + Dir, + RestNodeList, + KeyStart, + InEndRangeFun, + GroupedKey2, + GroupedKVsAcc2, + GroupedRedsAcc2, + KeyGroupFun, + Fun, + Acc2 + ); +reduce_stream_kp_node2( + Bt, + Dir, + NodeList, + KeyStart, + InEndRangeFun, + GroupedKey, + GroupedKVsAcc, + GroupedRedsAcc, + KeyGroupFun, + Fun, + Acc +) -> + {Grouped0, Ungrouped0} = lists:splitwith( + fun({Key, _}) -> + KeyGroupFun(GroupedKey, Key) + end, + NodeList + ), {GroupedNodes, UngroupedNodes} = - case Grouped0 of - [] -> - {Grouped0, Ungrouped0}; - _ -> - [FirstGrouped | RestGrouped] = lists:reverse(Grouped0), - {RestGrouped, [FirstGrouped | Ungrouped0]} - end, + case Grouped0 of + [] -> + {Grouped0, Ungrouped0}; + _ -> + [FirstGrouped | RestGrouped] = lists:reverse(Grouped0), + {RestGrouped, [FirstGrouped | Ungrouped0]} + end, GroupedReds = [element(2, Node) || {_, Node} <- GroupedNodes], case UngroupedNodes of - [{_Key, NodeInfo}|RestNodes] -> - {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} = - reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, GroupedKey, - GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc), - reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, InEndRangeFun, GroupedKey2, - GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2); - [] -> - {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey} + [{_Key, NodeInfo} | RestNodes] -> + {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} = + reduce_stream_node( + Bt, + Dir, + NodeInfo, + KeyStart, + InEndRangeFun, + GroupedKey, + GroupedKVsAcc, + GroupedReds ++ GroupedRedsAcc, + KeyGroupFun, + Fun, + Acc + ), + reduce_stream_kp_node2( + Bt, + Dir, + RestNodes, + KeyStart, + InEndRangeFun, + GroupedKey2, + GroupedKVsAcc2, + GroupedRedsAcc2, + KeyGroupFun, + Fun, + Acc2 + ); + [] -> + {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey} end. adjust_dir(fwd, List) -> @@ -753,20 +1068,20 @@ stream_node(Bt, Reds, Node, StartKey, InRange, Dir, Fun, Acc) -> Pointer = element(1, Node), {NodeType, NodeList} = get_node(Bt, Pointer), case NodeType of - kp_node -> - stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc); - kv_node -> - stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc) + kp_node -> + stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc); + kv_node -> + stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc) end. stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc) -> Pointer = element(1, Node), {NodeType, NodeList} = get_node(Bt, Pointer), case NodeType of - kp_node -> - stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc); - kv_node -> - stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc) + kp_node -> + stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc); + kv_node -> + stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc) end. stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) -> @@ -774,84 +1089,87 @@ stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) -> stream_kp_node(Bt, Reds, [{Key, Node} | Rest], InRange, Dir, Fun, Acc) -> Red = element(2, Node), case Fun(traverse, Key, Red, Acc) of - {ok, Acc2} -> - case stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc2) of - {ok, Acc3} -> - stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3); - {stop, LastReds, Acc3} -> - {stop, LastReds, Acc3} - end; - {skip, Acc2} -> - stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2); - {stop, Acc2} -> - {stop, Reds, Acc2} + {ok, Acc2} -> + case stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc2) of + {ok, Acc3} -> + stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3); + {stop, LastReds, Acc3} -> + {stop, LastReds, Acc3} + end; + {skip, Acc2} -> + stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2); + {stop, Acc2} -> + {stop, Reds, Acc2} end. drop_nodes(_Bt, Reds, _StartKey, []) -> {Reds, []}; drop_nodes(Bt, Reds, StartKey, [{NodeKey, Node} | RestKPs]) -> case less(Bt, NodeKey, StartKey) of - true -> - drop_nodes(Bt, [element(2, Node) | Reds], StartKey, RestKPs); - false -> - {Reds, [{NodeKey, Node} | RestKPs]} + true -> + drop_nodes(Bt, [element(2, Node) | Reds], StartKey, RestKPs); + false -> + {Reds, [{NodeKey, Node} | RestKPs]} end. stream_kp_node(Bt, Reds, KPs, StartKey, InRange, Dir, Fun, Acc) -> {NewReds, NodesToStream} = - case Dir of - fwd -> - % drop all nodes sorting before the key - drop_nodes(Bt, Reds, StartKey, KPs); - rev -> - % keep all nodes sorting before the key, AND the first node to sort after - RevKPs = lists:reverse(KPs), - case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of - {_RevsBefore, []} -> - % everything sorts before it - {Reds, KPs}; - {RevBefore, [FirstAfter | Drop]} -> - {[element(2, Node) || {_K, Node} <- Drop] ++ Reds, - [FirstAfter | lists:reverse(RevBefore)]} - end - end, + case Dir of + fwd -> + % drop all nodes sorting before the key + drop_nodes(Bt, Reds, StartKey, KPs); + rev -> + % keep all nodes sorting before the key, AND the first node to sort after + RevKPs = lists:reverse(KPs), + case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of + {_RevsBefore, []} -> + % everything sorts before it + {Reds, KPs}; + {RevBefore, [FirstAfter | Drop]} -> + {[element(2, Node) || {_K, Node} <- Drop] ++ Reds, [ + FirstAfter | lists:reverse(RevBefore) + ]} + end + end, case NodesToStream of - [] -> - {ok, Acc}; - [{_Key, Node} | Rest] -> - case stream_node(Bt, NewReds, Node, StartKey, InRange, Dir, Fun, Acc) of - {ok, Acc2} -> - Red = element(2, Node), - stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2); - {stop, LastReds, Acc2} -> - {stop, LastReds, Acc2} - end + [] -> + {ok, Acc}; + [{_Key, Node} | Rest] -> + case stream_node(Bt, NewReds, Node, StartKey, InRange, Dir, Fun, Acc) of + {ok, Acc2} -> + Red = element(2, Node), + stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2); + {stop, LastReds, Acc2} -> + {stop, LastReds, Acc2} + end end. stream_kv_node(Bt, Reds, KVs, StartKey, InRange, Dir, Fun, Acc) -> DropFun = - case Dir of - fwd -> - fun({Key, _}) -> less(Bt, Key, StartKey) end; - rev -> - fun({Key, _}) -> less(Bt, StartKey, Key) end - end, + case Dir of + fwd -> + fun({Key, _}) -> less(Bt, Key, StartKey) end; + rev -> + fun({Key, _}) -> less(Bt, StartKey, Key) end + end, {LTKVs, GTEKVs} = lists:splitwith(DropFun, KVs), - AssembleLTKVs = [assemble(Bt,K,V) || {K,V} <- LTKVs], + AssembleLTKVs = [assemble(Bt, K, V) || {K, V} <- LTKVs], stream_kv_node2(Bt, Reds, AssembleLTKVs, GTEKVs, InRange, Dir, Fun, Acc). stream_kv_node2(_Bt, _Reds, _PrevKVs, [], _InRange, _Dir, _Fun, Acc) -> {ok, Acc}; -stream_kv_node2(Bt, Reds, PrevKVs, [{K,V} | RestKVs], InRange, Dir, Fun, Acc) -> +stream_kv_node2(Bt, Reds, PrevKVs, [{K, V} | RestKVs], InRange, Dir, Fun, Acc) -> case InRange(K) of - false -> - {stop, {PrevKVs, Reds}, Acc}; - true -> - AssembledKV = assemble(Bt, K, V), - case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of - {ok, Acc2} -> - stream_kv_node2(Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2); - {stop, Acc2} -> - {stop, {PrevKVs, Reds}, Acc2} - end + false -> + {stop, {PrevKVs, Reds}, Acc}; + true -> + AssembledKV = assemble(Bt, K, V), + case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of + {ok, Acc2} -> + stream_kv_node2( + Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2 + ); + {stop, Acc2} -> + {stop, {PrevKVs, Reds}, Acc2} + end end. diff --git a/src/couch/src/couch_changes.erl b/src/couch/src/couch_changes.erl index 2078fed3a..089cda975 100644 --- a/src/couch/src/couch_changes.erl +++ b/src/couch/src/couch_changes.erl @@ -70,62 +70,80 @@ handle_db_changes(Args0, Req, Db0) -> end, Start = fun() -> {ok, Db} = couch_db:reopen(Db0), - StartSeq = case Dir of - rev -> - couch_db:get_update_seq(Db); - fwd -> - Since - end, + StartSeq = + case Dir of + rev -> + couch_db:get_update_seq(Db); + fwd -> + Since + end, {Db, StartSeq} end, % begin timer to deal with heartbeat when filter function fails case Args#changes_args.heartbeat of - undefined -> - erlang:erase(last_changes_heartbeat); - Val when is_integer(Val); Val =:= true -> - put(last_changes_heartbeat, os:timestamp()) + undefined -> + erlang:erase(last_changes_heartbeat); + Val when is_integer(Val); Val =:= true -> + put(last_changes_heartbeat, os:timestamp()) end, case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of - true -> - fun(CallbackAcc) -> - {Callback, UserAcc} = get_callback_acc(CallbackAcc), - {ok, Listener} = StartListenerFun(), - - {Db, StartSeq} = Start(), - UserAcc2 = start_sending_changes(Callback, UserAcc, Feed), - {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback), - Acc0 = build_acc(Args, Callback, UserAcc2, Db, StartSeq, - <<"">>, Timeout, TimeoutFun), - try - keep_sending_changes( - Args#changes_args{dir=fwd}, - Acc0, - true) - after - couch_event:stop_listener(Listener), - get_rest_updated(ok) % clean out any remaining update messages + true -> + fun(CallbackAcc) -> + {Callback, UserAcc} = get_callback_acc(CallbackAcc), + {ok, Listener} = StartListenerFun(), + + {Db, StartSeq} = Start(), + UserAcc2 = start_sending_changes(Callback, UserAcc, Feed), + {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback), + Acc0 = build_acc( + Args, + Callback, + UserAcc2, + Db, + StartSeq, + <<"">>, + Timeout, + TimeoutFun + ), + try + keep_sending_changes( + Args#changes_args{dir = fwd}, + Acc0, + true + ) + after + couch_event:stop_listener(Listener), + % clean out any remaining update messages + get_rest_updated(ok) + end + end; + false -> + fun(CallbackAcc) -> + {Callback, UserAcc} = get_callback_acc(CallbackAcc), + UserAcc2 = start_sending_changes(Callback, UserAcc, Feed), + {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback), + {Db, StartSeq} = Start(), + Acc0 = build_acc( + Args#changes_args{feed = "normal"}, + Callback, + UserAcc2, + Db, + StartSeq, + <<>>, + Timeout, + TimeoutFun + ), + {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} = + send_changes( + Acc0, + Dir, + true + ), + end_sending_changes(Callback, UserAcc3, LastSeq, Feed) end - end; - false -> - fun(CallbackAcc) -> - {Callback, UserAcc} = get_callback_acc(CallbackAcc), - UserAcc2 = start_sending_changes(Callback, UserAcc, Feed), - {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback), - {Db, StartSeq} = Start(), - Acc0 = build_acc(Args#changes_args{feed="normal"}, Callback, - UserAcc2, Db, StartSeq, <<>>, - Timeout, TimeoutFun), - {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} = - send_changes( - Acc0, - Dir, - true), - end_sending_changes(Callback, UserAcc3, LastSeq, Feed) - end end. - handle_db_event(_DbName, updated, Parent) -> Parent ! updated, {ok, Parent}; @@ -135,7 +153,6 @@ handle_db_event(_DbName, deleted, Parent) -> handle_db_event(_DbName, _Event, Parent) -> {ok, Parent}. - handle_view_event(_DbName, Msg, {Parent, DDocId}) -> case Msg of {index_commit, DDocId} -> @@ -152,17 +169,17 @@ get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 3) -> get_callback_acc(Callback) when is_function(Callback, 2) -> {fun(Ev, Data, _) -> Callback(Ev, Data) end, ok}. - configure_filter("_doc_ids", Style, Req, _Db) -> {doc_ids, Style, get_doc_ids(Req)}; configure_filter("_selector", Style, Req, _Db) -> - {selector, Style, get_selector_and_fields(Req)}; + {selector, Style, get_selector_and_fields(Req)}; configure_filter("_design", Style, _Req, _Db) -> {design_docs, Style}; configure_filter("_view", Style, Req, Db) -> ViewName = get_view_qs(Req), - if ViewName /= "" -> ok; true -> - throw({bad_request, "`view` filter parameter is not provided."}) + if + ViewName /= "" -> ok; + true -> throw({bad_request, "`view` filter parameter is not provided."}) end, ViewNameParts = string:tokens(ViewName, "/"), case [?l2b(couch_httpd:unquote(Part)) || Part <- ViewNameParts] of @@ -196,10 +213,9 @@ configure_filter(FilterName, Style, Req, Db) -> true -> DIR = fabric_util:doc_id_and_rev(DDoc), {fetch, custom, Style, Req, DIR, FName}; - false-> + false -> {custom, Style, Req, DDoc, FName} end; - [] -> {default, Style}; _Else -> @@ -207,8 +223,7 @@ configure_filter(FilterName, Style, Req, Db) -> throw({bad_request, Msg}) end. - -filter(Db, #full_doc_info{}=FDI, Filter) -> +filter(Db, #full_doc_info{} = FDI, Filter) -> filter(Db, couch_doc:to_doc_info(FDI), Filter); filter(_Db, DocInfo, {default, Style}) -> apply_style(DocInfo, Style); @@ -221,8 +236,10 @@ filter(_Db, DocInfo, {doc_ids, Style, DocIds}) -> end; filter(Db, DocInfo, {selector, Style, {Selector, _Fields}}) -> Docs = open_revs(Db, DocInfo, Style), - Passes = [mango_selector:match(Selector, couch_doc:to_json_obj(Doc, [])) - || Doc <- Docs], + Passes = [ + mango_selector:match(Selector, couch_doc:to_json_obj(Doc, [])) + || Doc <- Docs + ], filter_revs(Passes, Docs); filter(_Db, DocInfo, {design_docs, Style}) -> case DocInfo#doc_info.id of @@ -236,15 +253,15 @@ filter(Db, DocInfo, {view, Style, DDoc, VName}) -> {ok, Passes} = couch_query_servers:filter_view(DDoc, VName, Docs), filter_revs(Passes, Docs); filter(Db, DocInfo, {custom, Style, Req0, DDoc, FName}) -> - Req = case Req0 of - {json_req, _} -> Req0; - #httpd{} -> {json_req, chttpd_external:json_req_obj(Req0, Db)} - end, + Req = + case Req0 of + {json_req, _} -> Req0; + #httpd{} -> {json_req, chttpd_external:json_req_obj(Req0, Db)} + end, Docs = open_revs(Db, DocInfo, Style), {ok, Passes} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs), filter_revs(Passes, Docs). - get_view_qs({json_req, {Props}}) -> {Query} = couch_util:get_value(<<"query">>, Props, {[]}), binary_to_list(couch_util:get_value(<<"view">>, Query, "")); @@ -253,42 +270,43 @@ get_view_qs(Req) -> get_doc_ids({json_req, {Props}}) -> check_docids(couch_util:get_value(<<"doc_ids">>, Props)); -get_doc_ids(#httpd{method='POST'}=Req) -> +get_doc_ids(#httpd{method = 'POST'} = Req) -> couch_httpd:validate_ctype(Req, "application/json"), {Props} = couch_httpd:json_body_obj(Req), check_docids(couch_util:get_value(<<"doc_ids">>, Props)); -get_doc_ids(#httpd{method='GET'}=Req) -> +get_doc_ids(#httpd{method = 'GET'} = Req) -> DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")), check_docids(DocIds); get_doc_ids(_) -> throw({bad_request, no_doc_ids_provided}). - get_selector_and_fields({json_req, {Props}}) -> Selector = check_selector(couch_util:get_value(<<"selector">>, Props)), Fields = check_fields(couch_util:get_value(<<"fields">>, Props, nil)), {Selector, Fields}; -get_selector_and_fields(#httpd{method='POST'}=Req) -> +get_selector_and_fields(#httpd{method = 'POST'} = Req) -> couch_httpd:validate_ctype(Req, "application/json"), - get_selector_and_fields({json_req, couch_httpd:json_body_obj(Req)}); + get_selector_and_fields({json_req, couch_httpd:json_body_obj(Req)}); get_selector_and_fields(_) -> throw({bad_request, "Selector must be specified in POST payload"}). - check_docids(DocIds) when is_list(DocIds) -> - lists:foreach(fun - (DocId) when not is_binary(DocId) -> - Msg = "`doc_ids` filter parameter is not a list of doc ids.", - throw({bad_request, Msg}); - (_) -> ok - end, DocIds), + lists:foreach( + fun + (DocId) when not is_binary(DocId) -> + Msg = "`doc_ids` filter parameter is not a list of doc ids.", + throw({bad_request, Msg}); + (_) -> + ok + end, + DocIds + ), DocIds; check_docids(_) -> Msg = "`doc_ids` filter parameter is not a list of doc ids.", throw({bad_request, Msg}). - -check_selector(Selector={_}) -> +check_selector(Selector = {_}) -> try mango_selector:normalize(Selector) catch @@ -299,7 +317,6 @@ check_selector(Selector={_}) -> check_selector(_Selector) -> throw({bad_request, "Selector error: expected a JSON object"}). - check_fields(nil) -> nil; check_fields(Fields) when is_list(Fields) -> @@ -314,7 +331,6 @@ check_fields(Fields) when is_list(Fields) -> check_fields(_Fields) -> throw({bad_request, "Selector error: fields must be JSON array"}). - open_ddoc(Db, DDocId) -> DbName = couch_db:name(Db), case couch_db:is_clustered(Db) of @@ -330,39 +346,38 @@ open_ddoc(Db, DDocId) -> end end. - -check_member_exists(#doc{body={Props}}, Path) -> +check_member_exists(#doc{body = {Props}}, Path) -> couch_util:get_nested_json_value({Props}, Path). - -apply_style(#doc_info{revs=Revs}, main_only) -> - [#rev_info{rev=Rev} | _] = Revs, +apply_style(#doc_info{revs = Revs}, main_only) -> + [#rev_info{rev = Rev} | _] = Revs, [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}]; -apply_style(#doc_info{revs=Revs}, all_docs) -> - [{[{<<"rev">>, couch_doc:rev_to_str(R)}]} || #rev_info{rev=R} <- Revs]. - +apply_style(#doc_info{revs = Revs}, all_docs) -> + [{[{<<"rev">>, couch_doc:rev_to_str(R)}]} || #rev_info{rev = R} <- Revs]. open_revs(Db, DocInfo, Style) -> - DocInfos = case Style of - main_only -> [DocInfo]; - all_docs -> [DocInfo#doc_info{revs=[R]}|| R <- DocInfo#doc_info.revs] - end, + DocInfos = + case Style of + main_only -> [DocInfo]; + all_docs -> [DocInfo#doc_info{revs = [R]} || R <- DocInfo#doc_info.revs] + end, OpenOpts = [deleted, conflicts], % Relying on list comprehensions to silence errors OpenResults = [couch_db:open_doc(Db, DI, OpenOpts) || DI <- DocInfos], [Doc || {ok, Doc} <- OpenResults]. - filter_revs(Passes, Docs) -> - lists:flatmap(fun - ({true, #doc{revs={RevPos, [RevId | _]}}}) -> - RevStr = couch_doc:rev_to_str({RevPos, RevId}), - Change = {[{<<"rev">>, RevStr}]}, - [Change]; - (_) -> - [] - end, lists:zip(Passes, Docs)). - + lists:flatmap( + fun + ({true, #doc{revs = {RevPos, [RevId | _]}}}) -> + RevStr = couch_doc:rev_to_str({RevPos, RevId}), + Change = {[{<<"rev">>, RevStr}]}, + [Change]; + (_) -> + [] + end, + lists:zip(Passes, Docs) + ). get_changes_timeout(Args, Callback) -> #changes_args{ @@ -371,29 +386,30 @@ get_changes_timeout(Args, Callback) -> feed = ResponseType } = Args, DefaultTimeout = chttpd_util:get_chttpd_config_integer( - "changes_timeout", 60000), + "changes_timeout", 60000 + ), case Heartbeat of - undefined -> - case Timeout of undefined -> - {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end}; - infinity -> - {infinity, fun(UserAcc) -> {stop, UserAcc} end}; + case Timeout of + undefined -> + {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end}; + infinity -> + {infinity, fun(UserAcc) -> {stop, UserAcc} end}; + _ -> + {lists:min([DefaultTimeout, Timeout]), fun(UserAcc) -> {stop, UserAcc} end} + end; + true -> + {DefaultTimeout, fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}; _ -> - {lists:min([DefaultTimeout, Timeout]), - fun(UserAcc) -> {stop, UserAcc} end} - end; - true -> - {DefaultTimeout, - fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}; - _ -> - {lists:min([DefaultTimeout, Heartbeat]), - fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end} + {lists:min([DefaultTimeout, Heartbeat]), fun(UserAcc) -> + {ok, Callback(timeout, ResponseType, UserAcc)} + end} end. -start_sending_changes(_Callback, UserAcc, ResponseType) - when ResponseType =:= "continuous" - orelse ResponseType =:= "eventsource" -> +start_sending_changes(_Callback, UserAcc, ResponseType) when + ResponseType =:= "continuous" orelse + ResponseType =:= "eventsource" +-> UserAcc; start_sending_changes(Callback, UserAcc, ResponseType) -> Callback(start, ResponseType, UserAcc). @@ -421,8 +437,8 @@ build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun) - conflicts = Conflicts, timeout = Timeout, timeout_fun = TimeoutFun, - aggregation_results=[], - aggregation_kvs=[] + aggregation_results = [], + aggregation_kvs = [] }. send_changes(Acc, Dir, FirstRound) -> @@ -440,30 +456,35 @@ send_changes(Acc, Dir, FirstRound) -> couch_db:fold_changes(Db, StartSeq, DbEnumFun, Acc, Opts) end. - can_optimize(true, {doc_ids, _Style, DocIds}) -> - MaxDocIds = config:get_integer("couchdb", - "changes_doc_ids_optimization_threshold", 100), - if length(DocIds) =< MaxDocIds -> - {true, fun send_changes_doc_ids/6}; - true -> - false + MaxDocIds = config:get_integer( + "couchdb", + "changes_doc_ids_optimization_threshold", + 100 + ), + if + length(DocIds) =< MaxDocIds -> + {true, fun send_changes_doc_ids/6}; + true -> + false end; can_optimize(true, {design_docs, _Style}) -> {true, fun send_changes_design_docs/6}; can_optimize(_, _) -> false. - send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) -> Results = couch_db:get_full_doc_infos(Db, DocIds), - FullInfos = lists:foldl(fun - (#full_doc_info{}=FDI, Acc) -> [FDI | Acc]; - (not_found, Acc) -> Acc - end, [], Results), + FullInfos = lists:foldl( + fun + (#full_doc_info{} = FDI, Acc) -> [FDI | Acc]; + (not_found, Acc) -> Acc + end, + [], + Results + ), send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0). - send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) -> FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end, Opts = [ @@ -474,49 +495,62 @@ send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) -> {ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], Opts), send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0). - send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) -> - FoldFun = case Dir of - fwd -> fun lists:foldl/3; - rev -> fun lists:foldr/3 - end, - GreaterFun = case Dir of - fwd -> fun(A, B) -> A > B end; - rev -> fun(A, B) -> A =< B end - end, - DocInfos = lists:foldl(fun(FDI, Acc) -> - DI = couch_doc:to_doc_info(FDI), - case GreaterFun(DI#doc_info.high_seq, StartSeq) of - true -> [DI | Acc]; - false -> Acc - end - end, [], FullDocInfos), - SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos), - FinalAcc = try - FoldFun(fun(DocInfo, Acc) -> - case Fun(DocInfo, Acc) of - {ok, NewAcc} -> - NewAcc; - {stop, NewAcc} -> - throw({stop, NewAcc}) + FoldFun = + case Dir of + fwd -> fun lists:foldl/3; + rev -> fun lists:foldr/3 + end, + GreaterFun = + case Dir of + fwd -> fun(A, B) -> A > B end; + rev -> fun(A, B) -> A =< B end + end, + DocInfos = lists:foldl( + fun(FDI, Acc) -> + DI = couch_doc:to_doc_info(FDI), + case GreaterFun(DI#doc_info.high_seq, StartSeq) of + true -> [DI | Acc]; + false -> Acc end - end, Acc0, SortedDocInfos) - catch - {stop, Acc} -> Acc - end, + end, + [], + FullDocInfos + ), + SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos), + FinalAcc = + try + FoldFun( + fun(DocInfo, Acc) -> + case Fun(DocInfo, Acc) of + {ok, NewAcc} -> + NewAcc; + {stop, NewAcc} -> + throw({stop, NewAcc}) + end + end, + Acc0, + SortedDocInfos + ) + catch + {stop, Acc} -> Acc + end, case Dir of fwd -> - FinalAcc0 = case element(1, FinalAcc) of - changes_acc -> % we came here via couch_http or internal call - FinalAcc#changes_acc{seq = couch_db:get_update_seq(Db)}; - fabric_changes_acc -> % we came here via chttpd / fabric / rexi - FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)} - end, + FinalAcc0 = + case element(1, FinalAcc) of + % we came here via couch_http or internal call + changes_acc -> + FinalAcc#changes_acc{seq = couch_db:get_update_seq(Db)}; + % we came here via chttpd / fabric / rexi + fabric_changes_acc -> + FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)} + end, {ok, FinalAcc0}; - rev -> {ok, FinalAcc} + rev -> + {ok, FinalAcc} end. - keep_sending_changes(Args, Acc0, FirstRound) -> #changes_args{ feed = ResponseType, @@ -527,36 +561,44 @@ keep_sending_changes(Args, Acc0, FirstRound) -> {ok, ChangesAcc} = send_changes(Acc0, fwd, FirstRound), #changes_acc{ - db = Db, callback = Callback, - timeout = Timeout, timeout_fun = TimeoutFun, seq = EndSeq, - prepend = Prepend2, user_acc = UserAcc2, limit = NewLimit + db = Db, + callback = Callback, + timeout = Timeout, + timeout_fun = TimeoutFun, + seq = EndSeq, + prepend = Prepend2, + user_acc = UserAcc2, + limit = NewLimit } = maybe_upgrade_changes_acc(ChangesAcc), couch_db:close(Db), - if Limit > NewLimit, ResponseType == "longpoll" -> - end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType); - true -> - case wait_updated(Timeout, TimeoutFun, UserAcc2) of - {updated, UserAcc4} -> - DbOptions1 = [{user_ctx, couch_db:get_user_ctx(Db)} | DbOptions], - case couch_db:open(couch_db:name(Db), DbOptions1) of - {ok, Db2} -> - ?MODULE:keep_sending_changes( - Args#changes_args{limit=NewLimit}, - ChangesAcc#changes_acc{ - db = Db2, - user_acc = UserAcc4, - seq = EndSeq, - prepend = Prepend2, - timeout = Timeout, - timeout_fun = TimeoutFun}, - false); - _Else -> - end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType) - end; - {stop, UserAcc4} -> - end_sending_changes(Callback, UserAcc4, EndSeq, ResponseType) - end + if + Limit > NewLimit, ResponseType == "longpoll" -> + end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType); + true -> + case wait_updated(Timeout, TimeoutFun, UserAcc2) of + {updated, UserAcc4} -> + DbOptions1 = [{user_ctx, couch_db:get_user_ctx(Db)} | DbOptions], + case couch_db:open(couch_db:name(Db), DbOptions1) of + {ok, Db2} -> + ?MODULE:keep_sending_changes( + Args#changes_args{limit = NewLimit}, + ChangesAcc#changes_acc{ + db = Db2, + user_acc = UserAcc4, + seq = EndSeq, + prepend = Prepend2, + timeout = Timeout, + timeout_fun = TimeoutFun + }, + false + ); + _Else -> + end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType) + end; + {stop, UserAcc4} -> + end_sending_changes(Callback, UserAcc4, EndSeq, ResponseType) + end end. end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) -> @@ -564,46 +606,59 @@ end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) -> changes_enumerator(Value, Acc) -> #changes_acc{ - filter = Filter, callback = Callback, prepend = Prepend, - user_acc = UserAcc, limit = Limit, resp_type = ResponseType, db = Db, - timeout = Timeout, timeout_fun = TimeoutFun + filter = Filter, + callback = Callback, + prepend = Prepend, + user_acc = UserAcc, + limit = Limit, + resp_type = ResponseType, + db = Db, + timeout = Timeout, + timeout_fun = TimeoutFun } = maybe_upgrade_changes_acc(Acc), Results0 = filter(Db, Value, Filter), Results = [Result || Result <- Results0, Result /= null], - Seq = case Value of - #full_doc_info{} -> - Value#full_doc_info.update_seq; - #doc_info{} -> - Value#doc_info.high_seq - end, - Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end, + Seq = + case Value of + #full_doc_info{} -> + Value#full_doc_info.update_seq; + #doc_info{} -> + Value#doc_info.high_seq + end, + Go = + if + (Limit =< 1) andalso Results =/= [] -> stop; + true -> ok + end, case Results of - [] -> - {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc), - case Done of - stop -> - {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}; - ok -> - {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}} - end; - _ -> - if ResponseType =:= "continuous" orelse ResponseType =:= "eventsource" -> - ChangesRow = changes_row(Results, Value, Acc), - UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc), - reset_heartbeat(), - {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2, limit = Limit - 1}}; - true -> - ChangesRow = changes_row(Results, Value, Acc), - UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc), - reset_heartbeat(), - {Go, Acc#changes_acc{ - seq = Seq, prepend = <<",\n">>, - user_acc = UserAcc2, limit = Limit - 1}} - end + [] -> + {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc), + case Done of + stop -> + {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}; + ok -> + {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}} + end; + _ -> + if + ResponseType =:= "continuous" orelse ResponseType =:= "eventsource" -> + ChangesRow = changes_row(Results, Value, Acc), + UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc), + reset_heartbeat(), + {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2, limit = Limit - 1}}; + true -> + ChangesRow = changes_row(Results, Value, Acc), + UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc), + reset_heartbeat(), + {Go, Acc#changes_acc{ + seq = Seq, + prepend = <<",\n">>, + user_acc = UserAcc2, + limit = Limit - 1 + }} + end end. - - changes_row(Results, #full_doc_info{} = FDI, Acc) -> changes_row(Results, couch_doc:to_doc_info(FDI), Acc); changes_row(Results, DocInfo, Acc0) -> @@ -611,26 +666,27 @@ changes_row(Results, DocInfo, Acc0) -> #doc_info{ id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _] } = DocInfo, - {[{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++ - deleted_item(Del) ++ maybe_get_changes_doc(DocInfo, Acc)}. + { + [{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++ + deleted_item(Del) ++ maybe_get_changes_doc(DocInfo, Acc) + }. -maybe_get_changes_doc(Value, #changes_acc{include_docs=true}=Acc) -> +maybe_get_changes_doc(Value, #changes_acc{include_docs = true} = Acc) -> #changes_acc{ db = Db, doc_options = DocOpts, conflicts = Conflicts, filter = Filter } = Acc, - Opts = case Conflicts of - true -> [deleted, conflicts]; - false -> [deleted] - end, + Opts = + case Conflicts of + true -> [deleted, conflicts]; + false -> [deleted] + end, load_doc(Db, Value, Opts, DocOpts, Filter); - maybe_get_changes_doc(_Value, _Acc) -> []. - load_doc(Db, Value, Opts, DocOpts, Filter) -> case couch_index_util:load_doc(Db, Value, Opts) of null -> @@ -639,68 +695,66 @@ load_doc(Db, Value, Opts, DocOpts, Filter) -> [{doc, doc_to_json(Doc, DocOpts, Filter)}] end. - -doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}}) - when Fields =/= nil -> +doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}}) when + Fields =/= nil +-> mango_fields:extract(couch_doc:to_json_obj(Doc, DocOpts), Fields); doc_to_json(Doc, DocOpts, _Filter) -> couch_doc:to_json_obj(Doc, DocOpts). - deleted_item(true) -> [{<<"deleted">>, true}]; deleted_item(_) -> []. % waits for a updated msg, if there are multiple msgs, collects them. wait_updated(Timeout, TimeoutFun, UserAcc) -> receive - updated -> - get_rest_updated(UserAcc); - deleted -> - {stop, UserAcc} + updated -> + get_rest_updated(UserAcc); + deleted -> + {stop, UserAcc} after Timeout -> {Go, UserAcc2} = TimeoutFun(UserAcc), case Go of - ok -> - ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2); - stop -> - {stop, UserAcc2} + ok -> + ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2); + stop -> + {stop, UserAcc2} end end. get_rest_updated(UserAcc) -> receive - updated -> - get_rest_updated(UserAcc) + updated -> + get_rest_updated(UserAcc) after 0 -> {updated, UserAcc} end. reset_heartbeat() -> case get(last_changes_heartbeat) of - undefined -> - ok; - _ -> - put(last_changes_heartbeat, os:timestamp()) + undefined -> + ok; + _ -> + put(last_changes_heartbeat, os:timestamp()) end. maybe_heartbeat(Timeout, TimeoutFun, Acc) -> Before = get(last_changes_heartbeat), case Before of - undefined -> - {ok, Acc}; - _ -> - Now = os:timestamp(), - case timer:now_diff(Now, Before) div 1000 >= Timeout of - true -> - Acc2 = TimeoutFun(Acc), - put(last_changes_heartbeat, Now), - Acc2; - false -> - {ok, Acc} - end + undefined -> + {ok, Acc}; + _ -> + Now = os:timestamp(), + case timer:now_diff(Now, Before) div 1000 >= Timeout of + true -> + Acc2 = TimeoutFun(Acc), + put(last_changes_heartbeat, Now), + Acc2; + false -> + {ok, Acc} + end end. - maybe_upgrade_changes_acc(#changes_acc{} = Acc) -> Acc; maybe_upgrade_changes_acc(Acc) when tuple_size(Acc) == 19 -> diff --git a/src/couch/src/couch_compress.erl b/src/couch/src/couch_compress.erl index cfcc2a481..59d692058 100644 --- a/src/couch/src/couch_compress.erl +++ b/src/couch/src/couch_compress.erl @@ -25,21 +25,19 @@ -define(TERM_PREFIX, 131). -define(COMPRESSED_TERM_PREFIX, 131, 80). - get_compression_method() -> case config:get("couchdb", "file_compression") of - undefined -> - ?DEFAULT_COMPRESSION; - Method1 -> - case string:tokens(Method1, "_") of - [Method] -> - list_to_existing_atom(Method); - [Method, Level] -> - {list_to_existing_atom(Method), list_to_integer(Level)} - end + undefined -> + ?DEFAULT_COMPRESSION; + Method1 -> + case string:tokens(Method1, "_") of + [Method] -> + list_to_existing_atom(Method); + [Method, Level] -> + {list_to_existing_atom(Method), list_to_integer(Level)} + end end. - compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, snappy) -> Bin; compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, Method) -> @@ -57,11 +55,11 @@ compress(Term, snappy) -> try {ok, CompressedBin} = snappy:compress(Bin), <<?SNAPPY_PREFIX, CompressedBin/binary>> - catch exit:snappy_nif_not_loaded -> - Bin + catch + exit:snappy_nif_not_loaded -> + Bin end. - decompress(<<?SNAPPY_PREFIX, Rest/binary>>) -> {ok, TermBin} = snappy:decompress(Rest), binary_to_term(TermBin); @@ -70,7 +68,6 @@ decompress(<<?TERM_PREFIX, _/binary>> = Bin) -> decompress(_) -> error(invalid_compression). - is_compressed(<<?SNAPPY_PREFIX, _/binary>>, Method) -> Method =:= snappy; is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, {deflate, _Level}) -> @@ -84,7 +81,6 @@ is_compressed(Term, _Method) when not is_binary(Term) -> is_compressed(_, _) -> error(invalid_compression). - uncompressed_size(<<?SNAPPY_PREFIX, Rest/binary>>) -> {ok, Size} = snappy:uncompressed_length(Rest), Size; diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl index fdcf23e1b..18ef9c998 100644 --- a/src/couch/src/couch_db.erl +++ b/src/couch/src/couch_db.erl @@ -129,21 +129,22 @@ new_revid/1 ]). - -export([ start_link/4 ]). - -include_lib("couch/include/couch_db.hrl"). -include("couch_db_int.hrl"). -define(DBNAME_REGEX, - "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex - "(\\.[0-9]{10,})?$" % but allow an optional shard timestamp at the end + % use the stock CouchDB regex + "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" + % but allow an optional shard timestamp at the end + "(\\.[0-9]{10,})?$" ). -define(DEFAULT_COMPRESSIBLE_TYPES, - "text/*, application/javascript, application/json, application/xml"). + "text/*, application/javascript, application/json, application/xml" +). start_link(Engine, DbName, Filepath, Options) -> Arg = {Engine, DbName, Filepath, Options}, @@ -170,10 +171,10 @@ open(DbName, Options) -> close(Db), throw(Error) end; - Else -> Else + Else -> + Else end. - reopen(#db{} = Db) -> % We could have just swapped out the storage engine % for this database during a compaction so we just @@ -184,7 +185,6 @@ reopen(#db{} = Db) -> close(Db) end. - % You shouldn't call this. Its part of the ref counting between % couch_server and couch_db instances. incref(#db{} = Db) -> @@ -200,7 +200,6 @@ clustered_db(DbName, Options) when is_list(Options) -> security = SecProps, options = [{props, Props}] }}; - clustered_db(DbName, #user_ctx{} = UserCtx) -> clustered_db(DbName, [{user_ctx, UserCtx}]). @@ -231,7 +230,7 @@ close(#db{} = Db) -> close(?OLD_DB_REC) -> ok. -is_idle(#db{compactor_pid=nil} = Db) -> +is_idle(#db{compactor_pid = nil} = Db) -> monitored_by(Db) == []; is_idle(_Db) -> false. @@ -245,20 +244,19 @@ monitored_by(Db) -> [] end. - -monitor(#db{main_pid=MainPid}) -> +monitor(#db{main_pid = MainPid}) -> erlang:monitor(process, MainPid). start_compact(#db{} = Db) -> gen_server:call(Db#db.main_pid, start_compact). -cancel_compact(#db{main_pid=Pid}) -> +cancel_compact(#db{main_pid = Pid}) -> gen_server:call(Pid, cancel_compact). wait_for_compaction(Db) -> wait_for_compaction(Db, infinity). -wait_for_compaction(#db{main_pid=Pid}=Db, Timeout) -> +wait_for_compaction(#db{main_pid = Pid} = Db, Timeout) -> Start = os:timestamp(), case gen_server:call(Pid, compactor_pid) of CPid when is_pid(CPid) -> @@ -280,7 +278,7 @@ wait_for_compaction(#db{main_pid=Pid}=Db, Timeout) -> end. delete_doc(Db, Id, Revisions) -> - DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions], + DeletedDocs = [#doc{id = Id, revs = [Rev], deleted = true} || Rev <- Revisions], {ok, [Result]} = update_docs(Db, DeletedDocs, []), {ok, Result}. @@ -290,50 +288,55 @@ open_doc(Db, IdOrDocInfo) -> open_doc(Db, Id, Options) -> increment_stat(Db, [couchdb, database_reads]), case open_doc_int(Db, Id, Options) of - {ok, #doc{deleted=true}=Doc} -> - case lists:member(deleted, Options) of - true -> - apply_open_options({ok, Doc},Options); - false -> - {not_found, deleted} - end; - Else -> - apply_open_options(Else,Options) + {ok, #doc{deleted = true} = Doc} -> + case lists:member(deleted, Options) of + true -> + apply_open_options({ok, Doc}, Options); + false -> + {not_found, deleted} + end; + Else -> + apply_open_options(Else, Options) end. -apply_open_options({ok, Doc},Options) -> - apply_open_options2(Doc,Options); -apply_open_options(Else,_Options) -> +apply_open_options({ok, Doc}, Options) -> + apply_open_options2(Doc, Options); +apply_open_options(Else, _Options) -> Else. -apply_open_options2(Doc,[]) -> +apply_open_options2(Doc, []) -> {ok, Doc}; -apply_open_options2(#doc{atts=Atts0,revs=Revs}=Doc, - [{atts_since, PossibleAncestors}|Rest]) -> +apply_open_options2( + #doc{atts = Atts0, revs = Revs} = Doc, + [{atts_since, PossibleAncestors} | Rest] +) -> RevPos = find_ancestor_rev_pos(Revs, PossibleAncestors), - Atts = lists:map(fun(Att) -> - [AttPos, Data] = couch_att:fetch([revpos, data], Att), - if AttPos > RevPos -> couch_att:store(data, Data, Att); - true -> couch_att:store(data, stub, Att) - end - end, Atts0), - apply_open_options2(Doc#doc{atts=Atts}, Rest); + Atts = lists:map( + fun(Att) -> + [AttPos, Data] = couch_att:fetch([revpos, data], Att), + if + AttPos > RevPos -> couch_att:store(data, Data, Att); + true -> couch_att:store(data, stub, Att) + end + end, + Atts0 + ), + apply_open_options2(Doc#doc{atts = Atts}, Rest); apply_open_options2(Doc, [ejson_body | Rest]) -> apply_open_options2(couch_doc:with_ejson_body(Doc), Rest); -apply_open_options2(Doc,[_|Rest]) -> - apply_open_options2(Doc,Rest). - +apply_open_options2(Doc, [_ | Rest]) -> + apply_open_options2(Doc, Rest). find_ancestor_rev_pos({_, []}, _AttsSinceRevs) -> 0; find_ancestor_rev_pos(_DocRevs, []) -> 0; -find_ancestor_rev_pos({RevPos, [RevId|Rest]}, AttsSinceRevs) -> +find_ancestor_rev_pos({RevPos, [RevId | Rest]}, AttsSinceRevs) -> case lists:member({RevPos, RevId}, AttsSinceRevs) of - true -> - RevPos; - false -> - find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs) + true -> + RevPos; + false -> + find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs) end. open_doc_revs(Db, Id, Revs, Options) -> @@ -350,39 +353,52 @@ get_missing_revs(Db, IdRevsList) -> find_missing([], []) -> []; -find_missing([{Id, Revs}|RestIdRevs], [FullInfo | RestLookupInfo]) - when is_record(FullInfo, full_doc_info) -> +find_missing([{Id, Revs} | RestIdRevs], [FullInfo | RestLookupInfo]) when + is_record(FullInfo, full_doc_info) +-> case couch_key_tree:find_missing(FullInfo#full_doc_info.rev_tree, Revs) of - [] -> - find_missing(RestIdRevs, RestLookupInfo); - MissingRevs -> - #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo), - LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo], - % Find the revs that are possible parents of this rev - PossibleAncestors = - lists:foldl(fun({LeafPos, LeafRevId}, Acc) -> - % this leaf is a "possible ancenstor" of the missing - % revs if this LeafPos lessthan any of the missing revs - case lists:any(fun({MissingPos, _}) -> - LeafPos < MissingPos end, MissingRevs) of - true -> - [{LeafPos, LeafRevId} | Acc]; - false -> - Acc - end - end, [], LeafRevs), - [{Id, MissingRevs, PossibleAncestors} | - find_missing(RestIdRevs, RestLookupInfo)] + [] -> + find_missing(RestIdRevs, RestLookupInfo); + MissingRevs -> + #doc_info{revs = RevsInfo} = couch_doc:to_doc_info(FullInfo), + LeafRevs = [Rev || #rev_info{rev = Rev} <- RevsInfo], + % Find the revs that are possible parents of this rev + PossibleAncestors = + lists:foldl( + fun({LeafPos, LeafRevId}, Acc) -> + % this leaf is a "possible ancenstor" of the missing + % revs if this LeafPos lessthan any of the missing revs + case + lists:any( + fun({MissingPos, _}) -> + LeafPos < MissingPos + end, + MissingRevs + ) + of + true -> + [{LeafPos, LeafRevId} | Acc]; + false -> + Acc + end + end, + [], + LeafRevs + ), + [ + {Id, MissingRevs, PossibleAncestors} + | find_missing(RestIdRevs, RestLookupInfo) + ] end; -find_missing([{Id, Revs}|RestIdRevs], [not_found | RestLookupInfo]) -> +find_missing([{Id, Revs} | RestIdRevs], [not_found | RestLookupInfo]) -> [{Id, Revs, []} | find_missing(RestIdRevs, RestLookupInfo)]. get_doc_info(Db, Id) -> case get_full_doc_info(Db, Id) of - #full_doc_info{} = FDI -> - {ok, couch_doc:to_doc_info(FDI)}; - Else -> - Else + #full_doc_info{} = FDI -> + {ok, couch_doc:to_doc_info(FDI)}; + Else -> + Else end. get_full_doc_info(Db, Id) -> @@ -396,27 +412,39 @@ purge_docs(Db, IdRevs) -> purge_docs(Db, IdRevs, []). -spec purge_docs(#db{}, [{UUId, Id, [Rev]}], [PurgeOption]) -> - {ok, [Reply]} when + {ok, [Reply]} +when UUId :: binary(), Id :: binary() | list(), Rev :: {non_neg_integer(), binary()}, PurgeOption :: interactive_edit | replicated_changes, Reply :: {ok, []} | {ok, [Rev]}. purge_docs(#db{main_pid = Pid} = Db, UUIDsIdsRevs, Options) -> - UUIDsIdsRevs2 = [{UUID, couch_util:to_binary(Id), Revs} - || {UUID, Id, Revs} <- UUIDsIdsRevs], + UUIDsIdsRevs2 = [ + {UUID, couch_util:to_binary(Id), Revs} + || {UUID, Id, Revs} <- UUIDsIdsRevs + ], % Check here if any UUIDs already exist when % we're not replicating purge infos IsRepl = lists:member(replicated_changes, Options), - if IsRepl -> ok; true -> - UUIDs = [UUID || {UUID, _, _} <- UUIDsIdsRevs2], - lists:foreach(fun(Resp) -> - if Resp == not_found -> ok; true -> - Fmt = "Duplicate purge info UIUD: ~s", - Reason = io_lib:format(Fmt, [element(2, Resp)]), - throw({badreq, Reason}) - end - end, get_purge_infos(Db, UUIDs)) + if + IsRepl -> + ok; + true -> + UUIDs = [UUID || {UUID, _, _} <- UUIDsIdsRevs2], + lists:foreach( + fun(Resp) -> + if + Resp == not_found -> + ok; + true -> + Fmt = "Duplicate purge info UIUD: ~s", + Reason = io_lib:format(Fmt, [element(2, Resp)]), + throw({badreq, Reason}) + end + end, + get_purge_infos(Db, UUIDs) + ) end, increment_stat(Db, [couchdb, database_purges]), gen_server:call(Pid, {purge_docs, UUIDsIdsRevs2, Options}). @@ -430,7 +458,6 @@ purge_docs(#db{main_pid = Pid} = Db, UUIDsIdsRevs, Options) -> get_purge_infos(Db, UUIDs) -> couch_db_engine:load_purge_infos(Db, UUIDs). - get_minimum_purge_seq(#db{} = Db) -> PurgeSeq = couch_db_engine:get_purge_seq(Db), OldestPurgeSeq = couch_db_engine:get_oldest_purge_seq(Db), @@ -468,24 +495,31 @@ get_minimum_purge_seq(#db{} = Db) -> {start_key, list_to_binary(?LOCAL_DOC_PREFIX ++ "purge-")} ], {ok, MinIdxSeq} = couch_db:fold_local_docs(Db, FoldFun, InitMinSeq, Opts), - FinalSeq = case MinIdxSeq < PurgeSeq - PurgeInfosLimit of - true -> MinIdxSeq; - false -> erlang:max(0, PurgeSeq - PurgeInfosLimit) - end, + FinalSeq = + case MinIdxSeq < PurgeSeq - PurgeInfosLimit of + true -> MinIdxSeq; + false -> erlang:max(0, PurgeSeq - PurgeInfosLimit) + end, % Log a warning if we've got a purge sequence exceeding the % configured threshold. - if FinalSeq >= (PurgeSeq - PurgeInfosLimit) -> ok; true -> - Fmt = "The purge sequence for '~s' exceeds configured threshold", - couch_log:warning(Fmt, [couch_db:name(Db)]) + if + FinalSeq >= (PurgeSeq - PurgeInfosLimit) -> + ok; + true -> + Fmt = "The purge sequence for '~s' exceeds configured threshold", + couch_log:warning(Fmt, [couch_db:name(Db)]) end, FinalSeq. - purge_client_exists(DbName, DocId, Props) -> % Warn about clients that have not updated their purge % checkpoints in the last "index_lag_warn_seconds" LagWindow = config:get_integer( - "purge", "index_lag_warn_seconds", 86400), % Default 24 hours + % Default 24 hours + "purge", + "index_lag_warn_seconds", + 86400 + ), {Mega, Secs, _} = os:timestamp(), NowSecs = Mega * 1000000 + Secs, @@ -493,43 +527,50 @@ purge_client_exists(DbName, DocId, Props) -> try Exists = couch_db_plugin:is_valid_purge_client(DbName, Props), - if not Exists -> ok; true -> - Updated = couch_util:get_value(<<"updated_on">>, Props), - if is_integer(Updated) and Updated > LagThreshold -> ok; true -> - Diff = NowSecs - Updated, - Fmt1 = "Purge checkpoint '~s' not updated in ~p seconds - in database ~p", - couch_log:error(Fmt1, [DocId, Diff, DbName]) - end + if + not Exists -> + ok; + true -> + Updated = couch_util:get_value(<<"updated_on">>, Props), + if + is_integer(Updated) and Updated > LagThreshold -> + ok; + true -> + Diff = NowSecs - Updated, + Fmt1 = + "Purge checkpoint '~s' not updated in ~p seconds\n" + " in database ~p", + couch_log:error(Fmt1, [DocId, Diff, DbName]) + end end, Exists - catch _:_ -> - % If we fail to check for a client we have to assume that - % it exists. - Fmt2 = "Failed to check purge checkpoint using - document '~p' in database ~p", - couch_log:error(Fmt2, [DocId, DbName]), - true + catch + _:_ -> + % If we fail to check for a client we have to assume that + % it exists. + Fmt2 = + "Failed to check purge checkpoint using\n" + " document '~p' in database ~p", + couch_log:error(Fmt2, [DocId, DbName]), + true end. - -set_purge_infos_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 -> +set_purge_infos_limit(#db{main_pid = Pid} = Db, Limit) when Limit > 0 -> check_is_admin(Db), gen_server:call(Pid, {set_purge_infos_limit, Limit}, infinity); set_purge_infos_limit(_Db, _Limit) -> throw(invalid_purge_infos_limit). - get_after_doc_read_fun(#db{after_doc_read = Fun}) -> Fun. get_before_doc_update_fun(#db{before_doc_update = Fun}) -> Fun. -get_committed_update_seq(#db{committed_update_seq=Seq}) -> +get_committed_update_seq(#db{committed_update_seq = Seq}) -> Seq. -get_update_seq(#db{} = Db)-> +get_update_seq(#db{} = Db) -> couch_db_engine:get_update_seq(Db). get_user_ctx(#db{user_ctx = UserCtx}) -> @@ -537,13 +578,13 @@ get_user_ctx(#db{user_ctx = UserCtx}) -> get_user_ctx(?OLD_DB_REC = Db) -> ?OLD_DB_USER_CTX(Db). -get_purge_seq(#db{}=Db) -> +get_purge_seq(#db{} = Db) -> couch_db_engine:get_purge_seq(Db). -get_oldest_purge_seq(#db{}=Db) -> +get_oldest_purge_seq(#db{} = Db) -> couch_db_engine:get_oldest_purge_seq(Db). -get_purge_infos_limit(#db{}=Db) -> +get_purge_infos_limit(#db{} = Db) -> couch_db_engine:get_purge_infos_limit(Db). get_pid(#db{main_pid = Pid}) -> @@ -555,10 +596,10 @@ get_del_doc_count(Db) -> get_doc_count(Db) -> {ok, couch_db_engine:get_doc_count(Db)}. -get_uuid(#db{}=Db) -> +get_uuid(#db{} = Db) -> couch_db_engine:get_uuid(Db). -get_epochs(#db{}=Db) -> +get_epochs(#db{} = Db) -> Epochs = couch_db_engine:get_epochs(Db), validate_epochs(Epochs), Epochs. @@ -569,13 +610,13 @@ get_filepath(#db{filepath = FilePath}) -> get_instance_start_time(#db{instance_start_time = IST}) -> IST. -get_compacted_seq(#db{}=Db) -> +get_compacted_seq(#db{} = Db) -> couch_db_engine:get_compacted_seq(Db). get_compactor_pid(#db{compactor_pid = Pid}) -> Pid. -get_compactor_pid_sync(#db{main_pid=Pid}) -> +get_compactor_pid_sync(#db{main_pid = Pid}) -> case gen_server:call(Pid, compactor_pid, infinity) of CPid when is_pid(CPid) -> CPid; @@ -594,18 +635,21 @@ get_db_info(Db) -> {ok, DelDocCount} = get_del_doc_count(Db), SizeInfo = couch_db_engine:get_size_info(Db), DiskVersion = couch_db_engine:get_disk_version(Db), - Uuid = case get_uuid(Db) of - undefined -> null; - Uuid0 -> Uuid0 - end, - CompactedSeq = case get_compacted_seq(Db) of - undefined -> null; - Else1 -> Else1 - end, - Props = case couch_db_engine:get_props(Db) of - undefined -> null; - Else2 -> {Else2} - end, + Uuid = + case get_uuid(Db) of + undefined -> null; + Uuid0 -> Uuid0 + end, + CompactedSeq = + case get_compacted_seq(Db) of + undefined -> null; + Else1 -> Else1 + end, + Props = + case couch_db_engine:get_props(Db) of + undefined -> null; + Else2 -> {Else2} + end, InfoList = [ {db_name, Name}, {engine, couch_db_engine:get_engine(Db)}, @@ -630,15 +674,15 @@ get_partition_info(#db{} = Db, Partition) when is_binary(Partition) -> get_partition_info(_Db, _Partition) -> throw({bad_request, <<"`partition` is not valid">>}). - get_design_doc(#db{name = <<"shards/", _/binary>> = ShardDbName}, DDocId0) -> DDocId = couch_util:normalize_ddoc_id(DDocId0), DbName = mem3:dbname(ShardDbName), {_, Ref} = spawn_monitor(fun() -> exit(fabric:open_doc(DbName, DDocId, [])) end), - receive {'DOWN', Ref, _, _, Response} -> - Response + receive + {'DOWN', Ref, _, _, Response} -> + Response end; get_design_doc(#db{} = Db, DDocId0) -> DDocId = couch_util:normalize_ddoc_id(DDocId0), @@ -647,8 +691,9 @@ get_design_doc(#db{} = Db, DDocId0) -> get_design_docs(#db{name = <<"shards/", _/binary>> = ShardDbName}) -> DbName = mem3:dbname(ShardDbName), {_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end), - receive {'DOWN', Ref, _, _, Response} -> - Response + receive + {'DOWN', Ref, _, _, Response} -> + Response end; get_design_docs(#db{} = Db) -> FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end, @@ -659,47 +704,51 @@ get_design_doc_count(#db{} = Db) -> FoldFun = fun(_, Acc) -> {ok, Acc + 1} end, fold_design_docs(Db, FoldFun, 0, []). -check_is_admin(#db{user_ctx=UserCtx}=Db) -> +check_is_admin(#db{user_ctx = UserCtx} = Db) -> case is_admin(Db) of - true -> ok; + true -> + ok; false -> Reason = <<"You are not a db or server admin.">>, throw_security_error(UserCtx, Reason) end. -check_is_member(#db{user_ctx=UserCtx}=Db) -> +check_is_member(#db{user_ctx = UserCtx} = Db) -> case is_member(Db) of true -> ok; false -> throw_security_error(UserCtx) end. -is_admin(#db{user_ctx=UserCtx}=Db) -> +is_admin(#db{user_ctx = UserCtx} = Db) -> case couch_db_plugin:check_is_admin(Db) of - true -> true; + true -> + true; false -> {Admins} = get_admins(Db), is_authorized(UserCtx, Admins) end. -is_member(#db{user_ctx=UserCtx}=Db) -> +is_member(#db{user_ctx = UserCtx} = Db) -> case is_admin(Db) of - true -> true; + true -> + true; false -> case is_public_db(Db) of - true -> true; + true -> + true; false -> {Members} = get_members(Db), is_authorized(UserCtx, Members) end end. -is_public_db(#db{}=Db) -> +is_public_db(#db{} = Db) -> {Members} = get_members(Db), Names = couch_util:get_value(<<"names">>, Members, []), Roles = couch_util:get_value(<<"roles">>, Members, []), Names =:= [] andalso Roles =:= []. -is_authorized(#user_ctx{name=UserName,roles=UserRoles}, Security) -> +is_authorized(#user_ctx{name = UserName, roles = UserRoles}, Security) -> Names = couch_util:get_value(<<"names">>, Security, []), Roles = couch_util:get_value(<<"roles">>, Security, []), case check_security(roles, UserRoles, [<<"_admin">> | Roles]) of @@ -720,36 +769,38 @@ check_security(names, null, _) -> check_security(names, UserName, Names) -> lists:member(UserName, Names). -throw_security_error(#user_ctx{name=null}=UserCtx) -> +throw_security_error(#user_ctx{name = null} = UserCtx) -> Reason = <<"You are not authorized to access this db.">>, throw_security_error(UserCtx, Reason); -throw_security_error(#user_ctx{name=_}=UserCtx) -> +throw_security_error(#user_ctx{name = _} = UserCtx) -> Reason = <<"You are not allowed to access this db.">>, throw_security_error(UserCtx, Reason). -throw_security_error(#user_ctx{}=UserCtx, Reason) -> +throw_security_error(#user_ctx{} = UserCtx, Reason) -> Error = security_error_type(UserCtx), throw({Error, Reason}). -security_error_type(#user_ctx{name=null}) -> +security_error_type(#user_ctx{name = null}) -> unauthorized; -security_error_type(#user_ctx{name=_}) -> +security_error_type(#user_ctx{name = _}) -> forbidden. - -get_admins(#db{security=SecProps}) -> +get_admins(#db{security = SecProps}) -> couch_util:get_value(<<"admins">>, SecProps, {[]}). -get_members(#db{security=SecProps}) -> +get_members(#db{security = SecProps}) -> % we fallback to readers here for backwards compatibility - couch_util:get_value(<<"members">>, SecProps, - couch_util:get_value(<<"readers">>, SecProps, {[]})). + couch_util:get_value( + <<"members">>, + SecProps, + couch_util:get_value(<<"readers">>, SecProps, {[]}) + ). -get_security(#db{security=SecProps}) -> +get_security(#db{security = SecProps}) -> {SecProps}; get_security(?OLD_DB_REC = Db) -> {?OLD_DB_SECURITY(Db)}. -set_security(#db{main_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) -> +set_security(#db{main_pid = Pid} = Db, {NewSecProps}) when is_list(NewSecProps) -> check_is_admin(Db), ok = validate_security_object(NewSecProps), gen_server:call(Pid, {set_security, NewSecProps}, infinity); @@ -762,8 +813,11 @@ set_user_ctx(#db{} = Db, UserCtx) -> validate_security_object(SecProps) -> Admins = couch_util:get_value(<<"admins">>, SecProps, {[]}), % we fallback to readers here for backwards compatibility - Members = couch_util:get_value(<<"members">>, SecProps, - couch_util:get_value(<<"readers">>, SecProps, {[]})), + Members = couch_util:get_value( + <<"members">>, + SecProps, + couch_util:get_value(<<"readers">>, SecProps, {[]}) + ), ok = validate_names_and_roles(Admins), ok = validate_names_and_roles(Members), ok. @@ -771,18 +825,18 @@ validate_security_object(SecProps) -> % validate user input validate_names_and_roles({Props}) when is_list(Props) -> case couch_util:get_value(<<"names">>, Props, []) of - Ns when is_list(Ns) -> - [throw("names must be a JSON list of strings") ||N <- Ns, not is_binary(N)], + Ns when is_list(Ns) -> + [throw("names must be a JSON list of strings") || N <- Ns, not is_binary(N)], Ns; - _ -> - throw("names must be a JSON list of strings") + _ -> + throw("names must be a JSON list of strings") end, case couch_util:get_value(<<"roles">>, Props, []) of - Rs when is_list(Rs) -> - [throw("roles must be a JSON list of strings") ||R <- Rs, not is_binary(R)], - Rs; - _ -> - throw("roles must be a JSON list of strings") + Rs when is_list(Rs) -> + [throw("roles must be a JSON list of strings") || R <- Rs, not is_binary(R)], + Rs; + _ -> + throw("roles must be a JSON list of strings") end, ok; validate_names_and_roles(_) -> @@ -791,18 +845,17 @@ validate_names_and_roles(_) -> get_revs_limit(#db{} = Db) -> couch_db_engine:get_revs_limit(Db). -set_revs_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 -> +set_revs_limit(#db{main_pid = Pid} = Db, Limit) when Limit > 0 -> check_is_admin(Db), gen_server:call(Pid, {set_revs_limit, Limit}, infinity); set_revs_limit(_Db, _Limit) -> throw(invalid_revs_limit). -name(#db{name=Name}) -> +name(#db{name = Name}) -> Name; name(?OLD_DB_REC = Db) -> ?OLD_DB_NAME(Db). - validate_docid(#db{} = Db, DocId) when is_binary(DocId) -> couch_doc:validate_docid(DocId, name(Db)), case is_partitioned(Db) of @@ -812,7 +865,6 @@ validate_docid(#db{} = Db, DocId) when is_binary(DocId) -> ok end. - doc_from_json_obj_validate(#db{} = Db, DocJson) -> Doc = couch_doc:from_json_obj_validate(DocJson, name(Db)), {Props} = DocJson, @@ -825,22 +877,21 @@ doc_from_json_obj_validate(#db{} = Db, DocJson) -> end, Doc. - update_doc(Db, Doc, Options) -> update_doc(Db, Doc, Options, interactive_edit). update_doc(Db, Doc, Options, UpdateType) -> case update_docs(Db, [Doc], Options, UpdateType) of - {ok, [{ok, NewRev}]} -> - {ok, NewRev}; - {ok, [{{_Id, _Rev}, Error}]} -> - throw(Error); - {ok, [Error]} -> - throw(Error); - {ok, []} -> - % replication success - {Pos, [RevId | _]} = Doc#doc.revs, - {ok, {Pos, RevId}} + {ok, [{ok, NewRev}]} -> + {ok, NewRev}; + {ok, [{{_Id, _Rev}, Error}]} -> + throw(Error); + {ok, [Error]} -> + throw(Error); + {ok, []} -> + % replication success + {Pos, [RevId | _]} = Doc#doc.revs, + {ok, {Pos, RevId}} end. update_docs(Db, Docs) -> @@ -860,30 +911,30 @@ group_alike_docs(Docs) -> group_alike_docs([], Buckets) -> lists:reverse(lists:map(fun lists:reverse/1, Buckets)); -group_alike_docs([Doc|Rest], []) -> +group_alike_docs([Doc | Rest], []) -> group_alike_docs(Rest, [[Doc]]); -group_alike_docs([Doc|Rest], [Bucket|RestBuckets]) -> - [#doc{id=BucketId}|_] = Bucket, +group_alike_docs([Doc | Rest], [Bucket | RestBuckets]) -> + [#doc{id = BucketId} | _] = Bucket, case Doc#doc.id == BucketId of - true -> - % add to existing bucket - group_alike_docs(Rest, [[Doc|Bucket]|RestBuckets]); - false -> - % add to new bucket - group_alike_docs(Rest, [[Doc]|[Bucket|RestBuckets]]) + true -> + % add to existing bucket + group_alike_docs(Rest, [[Doc | Bucket] | RestBuckets]); + false -> + % add to new bucket + group_alike_docs(Rest, [[Doc] | [Bucket | RestBuckets]]) end. -validate_doc_update(#db{}=Db, #doc{id= <<"_design/",_/binary>>}=Doc, _GetDiskDocFun) -> +validate_doc_update(#db{} = Db, #doc{id = <<"_design/", _/binary>>} = Doc, _GetDiskDocFun) -> case catch check_is_admin(Db) of ok -> validate_ddoc(Db, Doc); Error -> Error end; validate_doc_update(#db{validate_doc_funs = undefined} = Db, Doc, Fun) -> ValidationFuns = load_validation_funs(Db), - validate_doc_update(Db#db{validate_doc_funs=ValidationFuns}, Doc, Fun); -validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) -> + validate_doc_update(Db#db{validate_doc_funs = ValidationFuns}, Doc, Fun); +validate_doc_update(#db{validate_doc_funs = []}, _Doc, _GetDiskDocFun) -> ok; -validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}, _GetDiskDocFun) -> +validate_doc_update(_Db, #doc{id = <<"_local/", _/binary>>}, _GetDiskDocFun) -> ok; validate_doc_update(Db, Doc, GetDiskDocFun) -> case get(io_priority) of @@ -911,22 +962,26 @@ validate_doc_update_int(Db, Doc, GetDiskDocFun) -> JsonCtx = couch_util:json_user_ctx(Db), SecObj = get_security(Db), try - [case Fun(Doc, DiskDoc, JsonCtx, SecObj) of - ok -> ok; - Error -> throw(Error) - end || Fun <- Db#db.validate_doc_funs], + [ + case Fun(Doc, DiskDoc, JsonCtx, SecObj) of + ok -> ok; + Error -> throw(Error) + end + || Fun <- Db#db.validate_doc_funs + ], ok catch throw:Error -> Error end end, - couch_stats:update_histogram([couchdb, query_server, vdu_process_time], - Fun). - + couch_stats:update_histogram( + [couchdb, query_server, vdu_process_time], + Fun + ). % to be safe, spawn a middleman here -load_validation_funs(#db{main_pid=Pid, name = <<"shards/", _/binary>>}=Db) -> +load_validation_funs(#db{main_pid = Pid, name = <<"shards/", _/binary>>} = Db) -> {_, Ref} = spawn_monitor(fun() -> exit(ddoc_cache:open(mem3:dbname(Db#db.name), validation_funs)) end), @@ -941,242 +996,326 @@ load_validation_funs(#db{main_pid=Pid, name = <<"shards/", _/binary>>}=Db) -> couch_log:error("could not load validation funs ~p", [Reason]), throw(internal_server_error) end; -load_validation_funs(#db{main_pid=Pid}=Db) -> +load_validation_funs(#db{main_pid = Pid} = Db) -> {ok, DDocInfos} = get_design_docs(Db), - OpenDocs = fun - (#full_doc_info{}=D) -> - {ok, Doc} = open_doc_int(Db, D, [ejson_body]), - Doc + OpenDocs = fun(#full_doc_info{} = D) -> + {ok, Doc} = open_doc_int(Db, D, [ejson_body]), + Doc end, DDocs = lists:map(OpenDocs, DDocInfos), - Funs = lists:flatmap(fun(DDoc) -> - case couch_doc:get_validate_doc_fun(DDoc) of - nil -> []; - Fun -> [Fun] - end - end, DDocs), + Funs = lists:flatmap( + fun(DDoc) -> + case couch_doc:get_validate_doc_fun(DDoc) of + nil -> []; + Fun -> [Fun] + end + end, + DDocs + ), gen_server:cast(Pid, {load_validation_funs, Funs}), Funs. reload_validation_funs(#db{} = Db) -> gen_server:cast(Db#db.main_pid, {load_validation_funs, undefined}). -prep_and_validate_update(Db, #doc{id=Id,revs={RevStart, Revs}}=Doc, - OldFullDocInfo, LeafRevsDict, AllowConflict) -> +prep_and_validate_update( + Db, + #doc{id = Id, revs = {RevStart, Revs}} = Doc, + OldFullDocInfo, + LeafRevsDict, + AllowConflict +) -> case Revs of - [PrevRev|_] -> - case dict:find({RevStart, PrevRev}, LeafRevsDict) of - {ok, {#leaf{deleted=Deleted, ptr=DiskSp}, DiskRevs}} -> - case couch_doc:has_stubs(Doc) of - true -> - DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs), - Doc2 = couch_doc:merge_stubs(Doc, DiskDoc), - {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2}; - false -> - LoadDiskDoc = fun() -> make_doc(Db,Id,Deleted,DiskSp,DiskRevs) end, - {validate_doc_update(Db, Doc, LoadDiskDoc), Doc} + [PrevRev | _] -> + case dict:find({RevStart, PrevRev}, LeafRevsDict) of + {ok, {#leaf{deleted = Deleted, ptr = DiskSp}, DiskRevs}} -> + case couch_doc:has_stubs(Doc) of + true -> + DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs), + Doc2 = couch_doc:merge_stubs(Doc, DiskDoc), + {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2}; + false -> + LoadDiskDoc = fun() -> make_doc(Db, Id, Deleted, DiskSp, DiskRevs) end, + {validate_doc_update(Db, Doc, LoadDiskDoc), Doc} + end; + error when AllowConflict -> + % will generate error if + couch_doc:merge_stubs(Doc, #doc{}), + % there are stubs + {validate_doc_update(Db, Doc, fun() -> nil end), Doc}; + error -> + {conflict, Doc} end; - error when AllowConflict -> - couch_doc:merge_stubs(Doc, #doc{}), % will generate error if - % there are stubs - {validate_doc_update(Db, Doc, fun() -> nil end), Doc}; - error -> - {conflict, Doc} - end; - [] -> - % new doc, and we have existing revs. - % reuse existing deleted doc - if OldFullDocInfo#full_doc_info.deleted orelse AllowConflict -> - {validate_doc_update(Db, Doc, fun() -> nil end), Doc}; - true -> - {conflict, Doc} - end + [] -> + % new doc, and we have existing revs. + % reuse existing deleted doc + if + OldFullDocInfo#full_doc_info.deleted orelse AllowConflict -> + {validate_doc_update(Db, Doc, fun() -> nil end), Doc}; + true -> + {conflict, Doc} + end end. - - -prep_and_validate_updates(_Db, [], [], _AllowConflict, AccPrepped, - AccFatalErrors) -> +prep_and_validate_updates( + _Db, + [], + [], + _AllowConflict, + AccPrepped, + AccFatalErrors +) -> AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)), {AccPrepped2, AccFatalErrors}; -prep_and_validate_updates(Db, [DocBucket|RestBuckets], [not_found|RestLookups], - AllowConflict, AccPrepped, AccErrors) -> +prep_and_validate_updates( + Db, + [DocBucket | RestBuckets], + [not_found | RestLookups], + AllowConflict, + AccPrepped, + AccErrors +) -> % no existing revs are known, {PreppedBucket, AccErrors3} = lists:foldl( - fun(#doc{revs=Revs}=Doc, {AccBucket, AccErrors2}) -> + fun(#doc{revs = Revs} = Doc, {AccBucket, AccErrors2}) -> case couch_doc:has_stubs(Doc) of - true -> - couch_doc:merge_stubs(Doc, #doc{}); % will throw exception - false -> ok + true -> + % will throw exception + couch_doc:merge_stubs(Doc, #doc{}); + false -> + ok end, case Revs of - {0, []} -> - case validate_doc_update(Db, Doc, fun() -> nil end) of - ok -> - {[Doc | AccBucket], AccErrors2}; - Error -> - {AccBucket, [{doc_tag(Doc), Error} | AccErrors2]} - end; - _ -> - % old revs specified but none exist, a conflict - {AccBucket, [{doc_tag(Doc), conflict} | AccErrors2]} + {0, []} -> + case validate_doc_update(Db, Doc, fun() -> nil end) of + ok -> + {[Doc | AccBucket], AccErrors2}; + Error -> + {AccBucket, [{doc_tag(Doc), Error} | AccErrors2]} + end; + _ -> + % old revs specified but none exist, a conflict + {AccBucket, [{doc_tag(Doc), conflict} | AccErrors2]} end end, - {[], AccErrors}, DocBucket), + {[], AccErrors}, + DocBucket + ), - prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict, - [PreppedBucket | AccPrepped], AccErrors3); -prep_and_validate_updates(Db, [DocBucket|RestBuckets], - [#full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo|RestLookups], - AllowConflict, AccPrepped, AccErrors) -> + prep_and_validate_updates( + Db, + RestBuckets, + RestLookups, + AllowConflict, + [PreppedBucket | AccPrepped], + AccErrors3 + ); +prep_and_validate_updates( + Db, + [DocBucket | RestBuckets], + [#full_doc_info{rev_tree = OldRevTree} = OldFullDocInfo | RestLookups], + AllowConflict, + AccPrepped, + AccErrors +) -> Leafs = couch_key_tree:get_all_leafs(OldRevTree), LeafRevsDict = dict:from_list([ - {{Start, RevId}, {Leaf, Revs}} || - {Leaf, {Start, [RevId | _]} = Revs} <- Leafs + {{Start, RevId}, {Leaf, Revs}} + || {Leaf, {Start, [RevId | _]} = Revs} <- Leafs ]), {PreppedBucket, AccErrors3} = lists:foldl( fun(Doc, {Docs2Acc, AccErrors2}) -> - case prep_and_validate_update(Db, Doc, OldFullDocInfo, - LeafRevsDict, AllowConflict) of - {ok, Doc2} -> - {[Doc2 | Docs2Acc], AccErrors2}; - {Error, _} -> - % Record the error - {Docs2Acc, [{doc_tag(Doc), Error} |AccErrors2]} + case + prep_and_validate_update( + Db, + Doc, + OldFullDocInfo, + LeafRevsDict, + AllowConflict + ) + of + {ok, Doc2} -> + {[Doc2 | Docs2Acc], AccErrors2}; + {Error, _} -> + % Record the error + {Docs2Acc, [{doc_tag(Doc), Error} | AccErrors2]} end end, - {[], AccErrors}, DocBucket), - prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict, - [PreppedBucket | AccPrepped], AccErrors3). - + {[], AccErrors}, + DocBucket + ), + prep_and_validate_updates( + Db, + RestBuckets, + RestLookups, + AllowConflict, + [PreppedBucket | AccPrepped], + AccErrors3 + ). update_docs(Db, Docs, Options) -> update_docs(Db, Docs, Options, interactive_edit). - prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) -> - Errors2 = [{{Id, {Pos, Rev}}, Error} || - {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors], + Errors2 = [ + {{Id, {Pos, Rev}}, Error} + || {#doc{id = Id, revs = {Pos, [Rev | _]}}, Error} <- AccErrors + ], AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)), {AccPrepped2, lists:reverse(Errors2)}; -prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) -> +prep_and_validate_replicated_updates( + Db, [Bucket | RestBuckets], [OldInfo | RestOldInfo], AccPrepped, AccErrors +) -> case OldInfo of - not_found -> - {ValidatedBucket, AccErrors3} = lists:foldl( - fun(Doc, {AccPrepped2, AccErrors2}) -> - case couch_doc:has_stubs(Doc) of - true -> - couch_doc:merge_stubs(Doc, #doc{}); % will throw exception - false -> ok - end, - case validate_doc_update(Db, Doc, fun() -> nil end) of - ok -> - {[Doc | AccPrepped2], AccErrors2}; - Error -> - {AccPrepped2, [{Doc, Error} | AccErrors2]} - end - end, - {[], AccErrors}, Bucket), - prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3); - #full_doc_info{rev_tree=OldTree} -> - OldLeafs = couch_key_tree:get_all_leafs_full(OldTree), - OldLeafsLU = [{Start, RevId} || {Start, [{RevId, _}|_]} <- OldLeafs], - NewPaths = lists:map(fun couch_doc:to_path/1, Bucket), - NewRevTree = couch_key_tree:multi_merge(OldTree, NewPaths), - Leafs = couch_key_tree:get_all_leafs_full(NewRevTree), - LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]), - {ValidatedBucket, AccErrors3} = - lists:foldl( - fun(#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, {AccValidated, AccErrors2}) -> - IsOldLeaf = lists:member({Pos, RevId}, OldLeafsLU), - case dict:find({Pos, RevId}, LeafRevsFullDict) of - {ok, {Start, Path}} when not IsOldLeaf -> - % our unflushed doc is a leaf node. Go back on the path - % to find the previous rev that's on disk. - - LoadPrevRevFun = fun() -> - make_first_doc_on_disk(Db,Id,Start-1, tl(Path)) - end, - + not_found -> + {ValidatedBucket, AccErrors3} = lists:foldl( + fun(Doc, {AccPrepped2, AccErrors2}) -> case couch_doc:has_stubs(Doc) of - true -> - DiskDoc = case LoadPrevRevFun() of - #doc{} = DiskDoc0 -> - DiskDoc0; + true -> + % will throw exception + couch_doc:merge_stubs(Doc, #doc{}); + false -> + ok + end, + case validate_doc_update(Db, Doc, fun() -> nil end) of + ok -> + {[Doc | AccPrepped2], AccErrors2}; + Error -> + {AccPrepped2, [{Doc, Error} | AccErrors2]} + end + end, + {[], AccErrors}, + Bucket + ), + prep_and_validate_replicated_updates( + Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3 + ); + #full_doc_info{rev_tree = OldTree} -> + OldLeafs = couch_key_tree:get_all_leafs_full(OldTree), + OldLeafsLU = [{Start, RevId} || {Start, [{RevId, _} | _]} <- OldLeafs], + NewPaths = lists:map(fun couch_doc:to_path/1, Bucket), + NewRevTree = couch_key_tree:multi_merge(OldTree, NewPaths), + Leafs = couch_key_tree:get_all_leafs_full(NewRevTree), + LeafRevsFullDict = dict:from_list([ + {{Start, RevId}, FullPath} + || {Start, [{RevId, _} | _]} = FullPath <- Leafs + ]), + {ValidatedBucket, AccErrors3} = + lists:foldl( + fun(#doc{id = Id, revs = {Pos, [RevId | _]}} = Doc, {AccValidated, AccErrors2}) -> + IsOldLeaf = lists:member({Pos, RevId}, OldLeafsLU), + case dict:find({Pos, RevId}, LeafRevsFullDict) of + {ok, {Start, Path}} when not IsOldLeaf -> + % our unflushed doc is a leaf node. Go back on the path + % to find the previous rev that's on disk. + + LoadPrevRevFun = fun() -> + make_first_doc_on_disk(Db, Id, Start - 1, tl(Path)) + end, + + case couch_doc:has_stubs(Doc) of + true -> + DiskDoc = + case LoadPrevRevFun() of + #doc{} = DiskDoc0 -> + DiskDoc0; + _ -> + % Force a missing_stub exception + couch_doc:merge_stubs(Doc, #doc{}) + end, + Doc2 = couch_doc:merge_stubs(Doc, DiskDoc), + GetDiskDocFun = fun() -> DiskDoc end; + false -> + Doc2 = Doc, + GetDiskDocFun = LoadPrevRevFun + end, + + case validate_doc_update(Db, Doc2, GetDiskDocFun) of + ok -> + {[Doc2 | AccValidated], AccErrors2}; + Error -> + {AccValidated, [{Doc, Error} | AccErrors2]} + end; _ -> - % Force a missing_stub exception - couch_doc:merge_stubs(Doc, #doc{}) - end, - Doc2 = couch_doc:merge_stubs(Doc, DiskDoc), - GetDiskDocFun = fun() -> DiskDoc end; - false -> - Doc2 = Doc, - GetDiskDocFun = LoadPrevRevFun + % this doc isn't a leaf or already exists in the tree. + % ignore but consider it a success. + {AccValidated, AccErrors2} + end end, - - case validate_doc_update(Db, Doc2, GetDiskDocFun) of - ok -> - {[Doc2 | AccValidated], AccErrors2}; - Error -> - {AccValidated, [{Doc, Error} | AccErrors2]} - end; - _ -> - % this doc isn't a leaf or already exists in the tree. - % ignore but consider it a success. - {AccValidated, AccErrors2} - end - end, - {[], AccErrors}, Bucket), - prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, - [ValidatedBucket | AccPrepped], AccErrors3) + {[], AccErrors}, + Bucket + ), + prep_and_validate_replicated_updates( + Db, + RestBuckets, + RestOldInfo, + [ValidatedBucket | AccPrepped], + AccErrors3 + ) end. - - -new_revid(#doc{body=Body, revs={OldStart,OldRevs}, atts=Atts, deleted=Deleted}) -> - DigestedAtts = lists:foldl(fun(Att, Acc) -> - [N, T, M] = couch_att:fetch([name, type, md5], Att), - case M == <<>> of - true -> Acc; - false -> [{N, T, M} | Acc] - end - end, [], Atts), +new_revid(#doc{body = Body, revs = {OldStart, OldRevs}, atts = Atts, deleted = Deleted}) -> + DigestedAtts = lists:foldl( + fun(Att, Acc) -> + [N, T, M] = couch_att:fetch([name, type, md5], Att), + case M == <<>> of + true -> Acc; + false -> [{N, T, M} | Acc] + end + end, + [], + Atts + ), case DigestedAtts of Atts2 when length(Atts) =/= length(Atts2) -> % We must have old style non-md5 attachments ?l2b(integer_to_list(couch_util:rand32())); Atts2 -> - OldRev = case OldRevs of [] -> 0; [OldRev0|_] -> OldRev0 end, - couch_hash:md5_hash(term_to_binary([Deleted, OldStart, OldRev, Body, Atts2], [{minor_version, 1}])) + OldRev = + case OldRevs of + [] -> 0; + [OldRev0 | _] -> OldRev0 + end, + couch_hash:md5_hash( + term_to_binary([Deleted, OldStart, OldRev, Body, Atts2], [{minor_version, 1}]) + ) end. new_revs([], OutBuckets, IdRevsAcc) -> {lists:reverse(OutBuckets), IdRevsAcc}; -new_revs([Bucket|RestBuckets], OutBuckets, IdRevsAcc) -> +new_revs([Bucket | RestBuckets], OutBuckets, IdRevsAcc) -> {NewBucket, IdRevsAcc3} = lists:mapfoldl( - fun(#doc{revs={Start, RevIds}}=Doc, IdRevsAcc2)-> - NewRevId = new_revid(Doc), - {Doc#doc{revs={Start+1, [NewRevId | RevIds]}}, - [{doc_tag(Doc), {ok, {Start+1, NewRevId}}} | IdRevsAcc2]} - end, IdRevsAcc, Bucket), - new_revs(RestBuckets, [NewBucket|OutBuckets], IdRevsAcc3). - -check_dup_atts(#doc{atts=Atts}=Doc) -> - lists:foldl(fun(Att, Names) -> - Name = couch_att:fetch(name, Att), - case ordsets:is_element(Name, Names) of - true -> throw({bad_request, <<"Duplicate attachments">>}); - false -> ordsets:add_element(Name, Names) - end - end, ordsets:new(), Atts), + fun(#doc{revs = {Start, RevIds}} = Doc, IdRevsAcc2) -> + NewRevId = new_revid(Doc), + {Doc#doc{revs = {Start + 1, [NewRevId | RevIds]}}, [ + {doc_tag(Doc), {ok, {Start + 1, NewRevId}}} | IdRevsAcc2 + ]} + end, + IdRevsAcc, + Bucket + ), + new_revs(RestBuckets, [NewBucket | OutBuckets], IdRevsAcc3). + +check_dup_atts(#doc{atts = Atts} = Doc) -> + lists:foldl( + fun(Att, Names) -> + Name = couch_att:fetch(name, Att), + case ordsets:is_element(Name, Names) of + true -> throw({bad_request, <<"Duplicate attachments">>}); + false -> ordsets:add_element(Name, Names) + end + end, + ordsets:new(), + Atts + ), Doc. tag_docs([]) -> []; -tag_docs([#doc{meta=Meta}=Doc | Rest]) -> - [Doc#doc{meta=[{ref, make_ref()} | Meta]} | tag_docs(Rest)]. +tag_docs([#doc{meta = Meta} = Doc | Rest]) -> + [Doc#doc{meta = [{ref, make_ref()} | Meta]} | tag_docs(Rest)]. -doc_tag(#doc{meta=Meta}) -> +doc_tag(#doc{meta = Meta}) -> case lists:keyfind(ref, 1, Meta) of {ref, Ref} when is_reference(Ref) -> Ref; false -> throw(doc_not_tagged); @@ -1187,58 +1326,105 @@ update_docs(Db, Docs0, Options, replicated_changes) -> Docs = tag_docs(Docs0), PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) -> - prep_and_validate_replicated_updates(Db0, DocBuckets0, - ExistingDocInfos, [], []) + prep_and_validate_replicated_updates( + Db0, + DocBuckets0, + ExistingDocInfos, + [], + [] + ) end, - {ok, DocBuckets, NonRepDocs, DocErrors} - = before_docs_update(Db, Docs, PrepValidateFun, replicated_changes), + {ok, DocBuckets, NonRepDocs, DocErrors} = + before_docs_update(Db, Docs, PrepValidateFun, replicated_changes), - DocBuckets2 = [[doc_flush_atts(Db, check_dup_atts(Doc)) - || Doc <- Bucket] || Bucket <- DocBuckets], - {ok, _} = write_and_commit(Db, DocBuckets2, - NonRepDocs, [merge_conflicts | Options]), + DocBuckets2 = [ + [ + doc_flush_atts(Db, check_dup_atts(Doc)) + || Doc <- Bucket + ] + || Bucket <- DocBuckets + ], + {ok, _} = write_and_commit( + Db, + DocBuckets2, + NonRepDocs, + [merge_conflicts | Options] + ), {ok, DocErrors}; - update_docs(Db, Docs0, Options, interactive_edit) -> Docs = tag_docs(Docs0), AllOrNothing = lists:member(all_or_nothing, Options), PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) -> - prep_and_validate_updates(Db0, DocBuckets0, ExistingDocInfos, - AllOrNothing, [], []) + prep_and_validate_updates( + Db0, + DocBuckets0, + ExistingDocInfos, + AllOrNothing, + [], + [] + ) end, - {ok, DocBuckets, NonRepDocs, DocErrors} - = before_docs_update(Db, Docs, PrepValidateFun, interactive_edit), - - if (AllOrNothing) and (DocErrors /= []) -> - RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]), - {aborted, lists:map(fun({Ref, Error}) -> - #doc{id=Id,revs={Start,RevIds}} = dict:fetch(Ref, RefErrorDict), - case {Start, RevIds} of - {Pos, [RevId | _]} -> {{Id, {Pos, RevId}}, Error}; - {0, []} -> {{Id, {0, <<>>}}, Error} - end - end, DocErrors)}; - true -> - Options2 = if AllOrNothing -> [merge_conflicts]; - true -> [] end ++ Options, - DocBuckets2 = [[ - doc_flush_atts(Db, set_new_att_revpos( - check_dup_atts(Doc))) - || Doc <- B] || B <- DocBuckets], - {DocBuckets3, IdRevs} = new_revs(DocBuckets2, [], []), - - {ok, CommitResults} = write_and_commit(Db, DocBuckets3, - NonRepDocs, Options2), - - ResultsDict = lists:foldl(fun({Key, Resp}, ResultsAcc) -> - dict:store(Key, Resp, ResultsAcc) - end, dict:from_list(IdRevs), CommitResults ++ DocErrors), - {ok, lists:map(fun(Doc) -> - dict:fetch(doc_tag(Doc), ResultsDict) - end, Docs)} + {ok, DocBuckets, NonRepDocs, DocErrors} = + before_docs_update(Db, Docs, PrepValidateFun, interactive_edit), + + if + (AllOrNothing) and (DocErrors /= []) -> + RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]), + {aborted, + lists:map( + fun({Ref, Error}) -> + #doc{id = Id, revs = {Start, RevIds}} = dict:fetch(Ref, RefErrorDict), + case {Start, RevIds} of + {Pos, [RevId | _]} -> {{Id, {Pos, RevId}}, Error}; + {0, []} -> {{Id, {0, <<>>}}, Error} + end + end, + DocErrors + )}; + true -> + Options2 = + if + AllOrNothing -> [merge_conflicts]; + true -> [] + end ++ Options, + DocBuckets2 = [ + [ + doc_flush_atts( + Db, + set_new_att_revpos( + check_dup_atts(Doc) + ) + ) + || Doc <- B + ] + || B <- DocBuckets + ], + {DocBuckets3, IdRevs} = new_revs(DocBuckets2, [], []), + + {ok, CommitResults} = write_and_commit( + Db, + DocBuckets3, + NonRepDocs, + Options2 + ), + + ResultsDict = lists:foldl( + fun({Key, Resp}, ResultsAcc) -> + dict:store(Key, Resp, ResultsAcc) + end, + dict:from_list(IdRevs), + CommitResults ++ DocErrors + ), + {ok, + lists:map( + fun(Doc) -> + dict:fetch(doc_tag(Doc), ResultsDict) + end, + Docs + )} end. % Returns the first available document on disk. Input list is a full rev path @@ -1246,10 +1432,10 @@ update_docs(Db, Docs0, Options, interactive_edit) -> make_first_doc_on_disk(_Db, _Id, _Pos, []) -> nil; make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #doc{}} | RestPath]) -> - make_first_doc_on_disk(Db, Id, Pos-1, RestPath); -make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING}|RestPath]) -> make_first_doc_on_disk(Db, Id, Pos - 1, RestPath); -make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #leaf{deleted=IsDel, ptr=Sp}} |_]=DocPath) -> +make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING} | RestPath]) -> + make_first_doc_on_disk(Db, Id, Pos - 1, RestPath); +make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #leaf{deleted = IsDel, ptr = Sp}} | _] = DocPath) -> Revs = [Rev || {Rev, _} <- DocPath], make_doc(Db, Id, IsDel, Sp, {Pos, Revs}). @@ -1267,90 +1453,105 @@ collect_results_with_metrics(Pid, MRef, []) -> collect_results(Pid, MRef, ResultsAcc) -> receive - {result, Pid, Result} -> - collect_results(Pid, MRef, [Result | ResultsAcc]); - {done, Pid} -> - {ok, ResultsAcc}; - {retry, Pid} -> - retry; - {'DOWN', MRef, _, _, Reason} -> - exit(Reason) + {result, Pid, Result} -> + collect_results(Pid, MRef, [Result | ResultsAcc]); + {done, Pid} -> + {ok, ResultsAcc}; + {retry, Pid} -> + retry; + {'DOWN', MRef, _, _, Reason} -> + exit(Reason) end. -write_and_commit(#db{main_pid=Pid, user_ctx=Ctx}=Db, DocBuckets1, - NonRepDocs, Options) -> +write_and_commit( + #db{main_pid = Pid, user_ctx = Ctx} = Db, + DocBuckets1, + NonRepDocs, + Options +) -> DocBuckets = prepare_doc_summaries(Db, DocBuckets1), MergeConflicts = lists:member(merge_conflicts, Options), MRef = erlang:monitor(process, Pid), try Pid ! {update_docs, self(), DocBuckets, NonRepDocs, MergeConflicts}, case collect_results_with_metrics(Pid, MRef, []) of - {ok, Results} -> {ok, Results}; - retry -> - % This can happen if the db file we wrote to was swapped out by - % compaction. Retry by reopening the db and writing to the current file - {ok, Db2} = open(Db#db.name, [{user_ctx, Ctx}]), - DocBuckets2 = [ - [doc_flush_atts(Db2, Doc) || Doc <- Bucket] || - Bucket <- DocBuckets1 - ], - % We only retry once - DocBuckets3 = prepare_doc_summaries(Db2, DocBuckets2), - close(Db2), - Pid ! {update_docs, self(), DocBuckets3, NonRepDocs, MergeConflicts}, - case collect_results_with_metrics(Pid, MRef, []) of - {ok, Results} -> {ok, Results}; - retry -> throw({update_error, compaction_retry}) - end + {ok, Results} -> + {ok, Results}; + retry -> + % This can happen if the db file we wrote to was swapped out by + % compaction. Retry by reopening the db and writing to the current file + {ok, Db2} = open(Db#db.name, [{user_ctx, Ctx}]), + DocBuckets2 = [ + [doc_flush_atts(Db2, Doc) || Doc <- Bucket] + || Bucket <- DocBuckets1 + ], + % We only retry once + DocBuckets3 = prepare_doc_summaries(Db2, DocBuckets2), + close(Db2), + Pid ! {update_docs, self(), DocBuckets3, NonRepDocs, MergeConflicts}, + case collect_results_with_metrics(Pid, MRef, []) of + {ok, Results} -> {ok, Results}; + retry -> throw({update_error, compaction_retry}) + end end after erlang:demonitor(MRef, [flush]) end. - prepare_doc_summaries(Db, BucketList) -> - [lists:map( - fun(#doc{body = Body, atts = Atts} = Doc0) -> - DiskAtts = [couch_att:to_disk_term(Att) || Att <- Atts], - {ok, SizeInfo} = couch_att:size_info(Atts), - AttsStream = case Atts of - [Att | _] -> - {stream, StreamEngine} = couch_att:fetch(data, Att), - StreamEngine; - [] -> - nil + [ + lists:map( + fun(#doc{body = Body, atts = Atts} = Doc0) -> + DiskAtts = [couch_att:to_disk_term(Att) || Att <- Atts], + {ok, SizeInfo} = couch_att:size_info(Atts), + AttsStream = + case Atts of + [Att | _] -> + {stream, StreamEngine} = couch_att:fetch(data, Att), + StreamEngine; + [] -> + nil + end, + Doc1 = Doc0#doc{ + atts = DiskAtts, + meta = + [ + {size_info, SizeInfo}, + {atts_stream, AttsStream}, + {ejson_size, couch_ejson_size:encoded_size(Body)} + ] ++ Doc0#doc.meta + }, + couch_db_engine:serialize_doc(Db, Doc1) end, - Doc1 = Doc0#doc{ - atts = DiskAtts, - meta = [ - {size_info, SizeInfo}, - {atts_stream, AttsStream}, - {ejson_size, couch_ejson_size:encoded_size(Body)} - ] ++ Doc0#doc.meta - }, - couch_db_engine:serialize_doc(Db, Doc1) - end, - Bucket) || Bucket <- BucketList]. - + Bucket + ) + || Bucket <- BucketList + ]. before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun, UpdateType) -> increment_stat(Db, [couchdb, database_writes]), % Separate _local docs from normal docs IsLocal = fun - (#doc{id= <<?LOCAL_DOC_PREFIX, _/binary>>}) -> true; + (#doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>}) -> true; (_) -> false end, {NonRepDocs, Docs2} = lists:partition(IsLocal, Docs), BucketList = group_alike_docs(Docs2), - DocBuckets = lists:map(fun(Bucket) -> - lists:map(fun(Doc) -> - DocWithBody = couch_doc:with_ejson_body(Doc), - couch_db_plugin:before_doc_update(Db, DocWithBody, UpdateType) - end, Bucket) - end, BucketList), + DocBuckets = lists:map( + fun(Bucket) -> + lists:map( + fun(Doc) -> + DocWithBody = couch_doc:with_ejson_body(Doc), + couch_db_plugin:before_doc_update(Db, DocWithBody, UpdateType) + end, + Bucket + ) + end, + BucketList + ), ValidatePred = fun (#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true; @@ -1363,15 +1564,14 @@ before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun, UpdateType Ids = [Id || [#doc{id = Id} | _] <- DocBuckets], ExistingDocs = get_full_doc_infos(Db, Ids), {DocBuckets2, DocErrors} = PVFun(Db, DocBuckets, ExistingDocs), - % remove empty buckets + % remove empty buckets DocBuckets3 = [Bucket || Bucket <- DocBuckets2, Bucket /= []], {ok, DocBuckets3, NonRepDocs, DocErrors}; false -> {ok, DocBuckets, NonRepDocs, []} end. - -set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts0}=Doc) -> +set_new_att_revpos(#doc{revs = {RevPos, _Revs}, atts = Atts0} = Doc) -> Atts = lists:map( fun(Att) -> case couch_att:fetch(data, Att) of @@ -1379,29 +1579,36 @@ set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts0}=Doc) -> {stream, _} -> Att; {Fd, _} when is_pid(Fd) -> Att; % write required so update RevPos - _ -> couch_att:store(revpos, RevPos+1, Att) + _ -> couch_att:store(revpos, RevPos + 1, Att) end - end, Atts0), + end, + Atts0 + ), Doc#doc{atts = Atts}. - doc_flush_atts(Db, Doc) -> - Doc#doc{atts=[couch_att:flush(Db, Att) || Att <- Doc#doc.atts]}. - + Doc#doc{atts = [couch_att:flush(Db, Att) || Att <- Doc#doc.atts]}. compressible_att_type(MimeType) when is_binary(MimeType) -> compressible_att_type(?b2l(MimeType)); compressible_att_type(MimeType) -> TypeExpList = re:split( - config:get("attachments", "compressible_types", - ?DEFAULT_COMPRESSIBLE_TYPES), + config:get( + "attachments", + "compressible_types", + ?DEFAULT_COMPRESSIBLE_TYPES + ), "\\s*,\\s*", [{return, list}] ), lists:any( fun(TypeExp) -> - Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"), - "(?:\\s*;.*?)?\\s*", $$], + Regexp = [ + "^\\s*", + re:replace(TypeExp, "\\*", ".*"), + "(?:\\s*;.*?)?\\s*", + $$ + ], re:run(MimeType, Regexp, [caseless]) =/= nomatch end, [T || T <- TypeExpList, T /= []] @@ -1419,74 +1626,80 @@ compressible_att_type(MimeType) -> % pretend that no Content-MD5 exists. with_stream(Db, Att, Fun) -> [InMd5, Type, Enc] = couch_att:fetch([md5, type, encoding], Att), - BufferSize = config:get_integer("couchdb", - "attachment_stream_buffer_size", 4096), - Options = case (Enc =:= identity) andalso compressible_att_type(Type) of - true -> - CompLevel = config:get_integer( - "attachments", "compression_level", 8), - [ - {buffer_size, BufferSize}, - {encoding, gzip}, - {compression_level, CompLevel} - ]; - _ -> - [{buffer_size, BufferSize}] - end, + BufferSize = config:get_integer( + "couchdb", + "attachment_stream_buffer_size", + 4096 + ), + Options = + case (Enc =:= identity) andalso compressible_att_type(Type) of + true -> + CompLevel = config:get_integer( + "attachments", "compression_level", 8 + ), + [ + {buffer_size, BufferSize}, + {encoding, gzip}, + {compression_level, CompLevel} + ]; + _ -> + [{buffer_size, BufferSize}] + end, {ok, OutputStream} = open_write_stream(Db, Options), - ReqMd5 = case Fun(OutputStream) of - {md5, FooterMd5} -> - case InMd5 of - md5_in_footer -> FooterMd5; - _ -> InMd5 - end; - _ -> - InMd5 - end, + ReqMd5 = + case Fun(OutputStream) of + {md5, FooterMd5} -> + case InMd5 of + md5_in_footer -> FooterMd5; + _ -> InMd5 + end; + _ -> + InMd5 + end, {StreamEngine, Len, IdentityLen, Md5, IdentityMd5} = couch_stream:close(OutputStream), couch_util:check_md5(IdentityMd5, ReqMd5), - {AttLen, DiskLen, NewEnc} = case Enc of - identity -> - case {Md5, IdentityMd5} of - {Same, Same} -> - {Len, IdentityLen, identity}; - _ -> - {Len, IdentityLen, gzip} - end; - gzip -> - case couch_att:fetch([att_len, disk_len], Att) of - [AL, DL] when AL =:= undefined orelse DL =:= undefined -> - % Compressed attachment uploaded through the standalone API. - {Len, Len, gzip}; - [AL, DL] -> - % This case is used for efficient push-replication, where a - % compressed attachment is located in the body of multipart - % content-type request. - {AL, DL, gzip} - end - end, - couch_att:store([ - {data, {stream, StreamEngine}}, - {att_len, AttLen}, - {disk_len, DiskLen}, - {md5, Md5}, - {encoding, NewEnc} - ], Att). - + {AttLen, DiskLen, NewEnc} = + case Enc of + identity -> + case {Md5, IdentityMd5} of + {Same, Same} -> + {Len, IdentityLen, identity}; + _ -> + {Len, IdentityLen, gzip} + end; + gzip -> + case couch_att:fetch([att_len, disk_len], Att) of + [AL, DL] when AL =:= undefined orelse DL =:= undefined -> + % Compressed attachment uploaded through the standalone API. + {Len, Len, gzip}; + [AL, DL] -> + % This case is used for efficient push-replication, where a + % compressed attachment is located in the body of multipart + % content-type request. + {AL, DL, gzip} + end + end, + couch_att:store( + [ + {data, {stream, StreamEngine}}, + {att_len, AttLen}, + {disk_len, DiskLen}, + {md5, Md5}, + {encoding, NewEnc} + ], + Att + ). open_write_stream(Db, Options) -> couch_db_engine:open_write_stream(Db, Options). - open_read_stream(Db, AttState) -> couch_db_engine:open_read_stream(Db, AttState). - is_active_stream(Db, StreamEngine) -> couch_db_engine:is_active_stream(Db, StreamEngine). - calculate_start_seq(_Db, _Node, Seq) when is_integer(Seq) -> Seq; calculate_start_seq(Db, Node, {Seq, Uuid}) -> @@ -1498,30 +1711,44 @@ calculate_start_seq(Db, _Node, {Seq, {split, Uuid}, EpochNode}) -> % Find last replicated sequence from split source to target mem3_rep:find_split_target_seq(Db, EpochNode, Uuid, Seq); false -> - couch_log:warning("~p calculate_start_seq not owner " + couch_log:warning( + "~p calculate_start_seq not owner " "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p, epochs: ~p", - [?MODULE, Db#db.name, Seq, Uuid, EpochNode, get_epochs(Db)]), + [?MODULE, Db#db.name, Seq, Uuid, EpochNode, get_epochs(Db)] + ), 0 end; calculate_start_seq(Db, Node, {Seq, Uuid, EpochNode}) -> case is_prefix(Uuid, get_uuid(Db)) of true -> case is_owner(EpochNode, Seq, get_epochs(Db)) of - true -> Seq; + true -> + Seq; false -> %% Shard might have been moved from another node. We %% matched the uuid already, try to find last viable %% sequence we can use - couch_log:warning( "~p calculate_start_seq not owner, " + couch_log:warning( + "~p calculate_start_seq not owner, " " trying replacement db: ~p, seq: ~p, uuid: ~p, " - "epoch_node: ~p, epochs: ~p", [?MODULE, Db#db.name, - Seq, Uuid, EpochNode, get_epochs(Db)]), + "epoch_node: ~p, epochs: ~p", + [ + ?MODULE, + Db#db.name, + Seq, + Uuid, + EpochNode, + get_epochs(Db) + ] + ), calculate_start_seq(Db, Node, {replace, EpochNode, Uuid, Seq}) end; false -> - couch_log:warning("~p calculate_start_seq uuid prefix mismatch " + couch_log:warning( + "~p calculate_start_seq uuid prefix mismatch " "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p", - [?MODULE, Db#db.name, Seq, Uuid, EpochNode]), + [?MODULE, Db#db.name, Seq, Uuid, EpochNode] + ), %% The file was rebuilt, most likely in a different %% order, so rewind. 0 @@ -1531,38 +1758,37 @@ calculate_start_seq(Db, _Node, {replace, OriginalNode, Uuid, Seq}) -> true -> try start_seq(get_epochs(Db), OriginalNode, Seq) - catch throw:epoch_mismatch -> - couch_log:warning("~p start_seq duplicate uuid on node: ~p " - "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p", - [?MODULE, node(), Db#db.name, Seq, Uuid, OriginalNode]), - 0 + catch + throw:epoch_mismatch -> + couch_log:warning( + "~p start_seq duplicate uuid on node: ~p " + "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p", + [?MODULE, node(), Db#db.name, Seq, Uuid, OriginalNode] + ), + 0 end; false -> {replace, OriginalNode, Uuid, Seq} end. - validate_epochs(Epochs) -> %% Assert uniqueness. case length(Epochs) == length(lists:ukeysort(2, Epochs)) of - true -> ok; + true -> ok; false -> erlang:error(duplicate_epoch) end, %% Assert order. case Epochs == lists:sort(fun({_, A}, {_, B}) -> B =< A end, Epochs) of - true -> ok; + true -> ok; false -> erlang:error(epoch_order) end. - is_prefix(Pattern, Subject) -> - binary:longest_common_prefix([Pattern, Subject]) == size(Pattern). - + binary:longest_common_prefix([Pattern, Subject]) == size(Pattern). is_owner(Node, Seq, Epochs) -> Node =:= owner_of(Epochs, Seq). - owner_of(Db, Seq) when not is_list(Db) -> owner_of(get_epochs(Db), Seq); owner_of([], _Seq) -> @@ -1572,7 +1798,6 @@ owner_of([{EpochNode, EpochSeq} | _Rest], Seq) when Seq > EpochSeq -> owner_of([_ | Rest], Seq) -> owner_of(Rest, Seq). - start_seq([{OrigNode, EpochSeq} | _], OrigNode, Seq) when Seq > EpochSeq -> %% OrigNode is the owner of the Seq so we can safely stream from there Seq; @@ -1586,43 +1811,34 @@ start_seq([_ | Rest], OrigNode, Seq) -> start_seq([], _OrigNode, _Seq) -> throw(epoch_mismatch). - fold_docs(Db, UserFun, UserAcc) -> fold_docs(Db, UserFun, UserAcc, []). fold_docs(Db, UserFun, UserAcc, Options) -> couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options). - fold_local_docs(Db, UserFun, UserAcc, Options) -> couch_db_engine:fold_local_docs(Db, UserFun, UserAcc, Options). - fold_design_docs(Db, UserFun, UserAcc, Options1) -> Options2 = set_design_doc_keys(Options1), couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options2). - fold_changes(Db, StartSeq, UserFun, UserAcc) -> fold_changes(Db, StartSeq, UserFun, UserAcc, []). - fold_changes(Db, StartSeq, UserFun, UserAcc, Opts) -> couch_db_engine:fold_changes(Db, StartSeq, UserFun, UserAcc, Opts). - fold_purge_infos(Db, StartPurgeSeq, Fun, Acc) -> fold_purge_infos(Db, StartPurgeSeq, Fun, Acc, []). - fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts) -> couch_db_engine:fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts). - count_changes_since(Db, SinceSeq) -> couch_db_engine:count_changes_since(Db, SinceSeq). - %%% Internal function %%% open_doc_revs_int(Db, IdRevs, Options) -> Ids = [Id || {Id, _Revs} <- IdRevs], @@ -1630,106 +1846,125 @@ open_doc_revs_int(Db, IdRevs, Options) -> lists:zipwith( fun({Id, Revs}, Lookup) -> case Lookup of - #full_doc_info{rev_tree=RevTree} -> - {FoundRevs, MissingRevs} = - case Revs of - all -> - {couch_key_tree:get_all_leafs(RevTree), []}; - _ -> - case lists:member(latest, Options) of - true -> - couch_key_tree:get_key_leafs(RevTree, Revs); - false -> - couch_key_tree:get(RevTree, Revs) - end - end, - FoundResults = - lists:map(fun({Value, {Pos, [Rev|_]}=FoundRevPath}) -> - case Value of - ?REV_MISSING -> - % we have the rev in our list but know nothing about it - {{not_found, missing}, {Pos, Rev}}; - #leaf{deleted=IsDeleted, ptr=SummaryPtr} -> - {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)} - end - end, FoundRevs), - Results = FoundResults ++ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs], - {ok, Results}; - not_found when Revs == all -> - {ok, []}; - not_found -> - {ok, [{{not_found, missing}, Rev} || Rev <- Revs]} + #full_doc_info{rev_tree = RevTree} -> + {FoundRevs, MissingRevs} = + case Revs of + all -> + {couch_key_tree:get_all_leafs(RevTree), []}; + _ -> + case lists:member(latest, Options) of + true -> + couch_key_tree:get_key_leafs(RevTree, Revs); + false -> + couch_key_tree:get(RevTree, Revs) + end + end, + FoundResults = + lists:map( + fun({Value, {Pos, [Rev | _]} = FoundRevPath}) -> + case Value of + ?REV_MISSING -> + % we have the rev in our list but know nothing about it + {{not_found, missing}, {Pos, Rev}}; + #leaf{deleted = IsDeleted, ptr = SummaryPtr} -> + {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)} + end + end, + FoundRevs + ), + Results = + FoundResults ++ + [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs], + {ok, Results}; + not_found when Revs == all -> + {ok, []}; + not_found -> + {ok, [{{not_found, missing}, Rev} || Rev <- Revs]} end end, - IdRevs, LookupResults). + IdRevs, + LookupResults + ). open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, Options) -> case couch_db_engine:open_local_docs(Db, [Id]) of - [#doc{} = Doc] -> - apply_open_options({ok, Doc}, Options); - [not_found] -> - {not_found, missing} + [#doc{} = Doc] -> + apply_open_options({ok, Doc}, Options); + [not_found] -> + {not_found, missing} end; -open_doc_int(Db, #doc_info{id=Id,revs=[RevInfo|_]}=DocInfo, Options) -> - #rev_info{deleted=IsDeleted,rev={Pos,RevId},body_sp=Bp} = RevInfo, - Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}), +open_doc_int(Db, #doc_info{id = Id, revs = [RevInfo | _]} = DocInfo, Options) -> + #rev_info{deleted = IsDeleted, rev = {Pos, RevId}, body_sp = Bp} = RevInfo, + Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos, [RevId]}), apply_open_options( - {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}}, Options); -open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) -> - #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} = + {ok, Doc#doc{meta = doc_meta_info(DocInfo, [], Options)}}, Options + ); +open_doc_int(Db, #full_doc_info{id = Id, rev_tree = RevTree} = FullDocInfo, Options) -> + #doc_info{revs = [#rev_info{deleted = IsDeleted, rev = Rev, body_sp = Bp} | _]} = DocInfo = couch_doc:to_doc_info(FullDocInfo), {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]), Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath), apply_open_options( - {ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}}, Options); + {ok, Doc#doc{meta = doc_meta_info(DocInfo, RevTree, Options)}}, Options + ); open_doc_int(Db, Id, Options) -> case get_full_doc_info(Db, Id) of - #full_doc_info{} = FullDocInfo -> - open_doc_int(Db, FullDocInfo, Options); - not_found -> - {not_found, missing} + #full_doc_info{} = FullDocInfo -> + open_doc_int(Db, FullDocInfo, Options); + not_found -> + {not_found, missing} end. -doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTree, Options) -> +doc_meta_info( + #doc_info{high_seq = Seq, revs = [#rev_info{rev = Rev} | RestInfo]}, RevTree, Options +) -> case lists:member(revs_info, Options) of - false -> []; - true -> - {[{Pos, RevPath}],[]} = - couch_key_tree:get_full_key_paths(RevTree, [Rev]), - - [{revs_info, Pos, lists:map( - fun({Rev1, ?REV_MISSING}) -> - {Rev1, missing}; - ({Rev1, Leaf}) -> - case Leaf#leaf.deleted of - true -> - {Rev1, deleted}; - false -> - {Rev1, available} - end - end, RevPath)}] - end ++ - case lists:member(conflicts, Options) of - false -> []; - true -> - case [Rev1 || #rev_info{rev=Rev1,deleted=false} <- RestInfo] of - [] -> []; - ConflictRevs -> [{conflicts, ConflictRevs}] - end - end ++ - case lists:member(deleted_conflicts, Options) of - false -> []; - true -> - case [Rev1 || #rev_info{rev=Rev1,deleted=true} <- RestInfo] of - [] -> []; - DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}] - end - end ++ - case lists:member(local_seq, Options) of - false -> []; - true -> [{local_seq, Seq}] - end. + false -> + []; + true -> + {[{Pos, RevPath}], []} = + couch_key_tree:get_full_key_paths(RevTree, [Rev]), + [ + {revs_info, Pos, + lists:map( + fun + ({Rev1, ?REV_MISSING}) -> + {Rev1, missing}; + ({Rev1, Leaf}) -> + case Leaf#leaf.deleted of + true -> + {Rev1, deleted}; + false -> + {Rev1, available} + end + end, + RevPath + )} + ] + end ++ + case lists:member(conflicts, Options) of + false -> + []; + true -> + case [Rev1 || #rev_info{rev = Rev1, deleted = false} <- RestInfo] of + [] -> []; + ConflictRevs -> [{conflicts, ConflictRevs}] + end + end ++ + case lists:member(deleted_conflicts, Options) of + false -> + []; + true -> + case [Rev1 || #rev_info{rev = Rev1, deleted = true} <- RestInfo] of + [] -> []; + DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}] + end + end ++ + case lists:member(local_seq, Options) of + false -> []; + true -> [{local_seq, Seq}] + end. make_doc(_Db, Id, Deleted, nil = _Bp, RevisionPath) -> #doc{ @@ -1747,29 +1982,29 @@ make_doc(#db{} = Db, Id, Deleted, Bp, {Pos, Revs}) -> body = Bp, deleted = Deleted }), - Doc1 = case Doc0#doc.atts of - BinAtts when is_binary(BinAtts) -> - Doc0#doc{ - atts = couch_compress:decompress(BinAtts) - }; - ListAtts when is_list(ListAtts) -> - Doc0 - end, + Doc1 = + case Doc0#doc.atts of + BinAtts when is_binary(BinAtts) -> + Doc0#doc{ + atts = couch_compress:decompress(BinAtts) + }; + ListAtts when is_list(ListAtts) -> + Doc0 + end, after_doc_read(Db, Doc1#doc{ atts = [couch_att:from_disk_term(Db, T) || T <- Doc1#doc.atts] }). - after_doc_read(#db{} = Db, Doc) -> DocWithBody = couch_doc:with_ejson_body(Doc), couch_db_plugin:after_doc_read(Db, DocWithBody). increment_stat(#db{options = Options}, Stat) -> case lists:member(sys_db, Options) of - true -> - ok; - false -> - couch_stats:increment_counter(Stat) + true -> + ok; + false -> + couch_stats:increment_counter(Stat) end. -spec normalize_dbname(list() | binary()) -> binary(). @@ -1779,23 +2014,22 @@ normalize_dbname(DbName) when is_list(DbName) -> normalize_dbname(DbName) when is_binary(DbName) -> mem3:dbname(couch_util:drop_dot_couch_ext(DbName)). - -spec dbname_suffix(list() | binary()) -> binary(). dbname_suffix(DbName) -> filename:basename(normalize_dbname(DbName)). - validate_dbname(DbName) when is_list(DbName) -> validate_dbname(?l2b(DbName)); validate_dbname(DbName) when is_binary(DbName) -> Normalized = normalize_dbname(DbName), couch_db_plugin:validate_dbname( - DbName, Normalized, fun validate_dbname_int/2). + DbName, Normalized, fun validate_dbname_int/2 + ). validate_dbname_int(DbName, Normalized) when is_binary(DbName) -> DbNoExt = couch_util:drop_dot_couch_ext(DbName), - case re:run(DbNoExt, ?DBNAME_REGEX, [{capture,none}, dollar_endonly]) of + case re:run(DbNoExt, ?DBNAME_REGEX, [{capture, none}, dollar_endonly]) of match -> ok; nomatch -> @@ -1811,76 +2045,81 @@ is_system_db_name(DbName) when is_binary(DbName) -> Normalized = normalize_dbname(DbName), Suffix = filename:basename(Normalized), case {filename:dirname(Normalized), lists:member(Suffix, ?SYSTEM_DATABASES)} of - {<<".">>, Result} -> Result; - {_Prefix, false} -> false; + {<<".">>, Result} -> + Result; + {_Prefix, false} -> + false; {Prefix, true} -> - ReOpts = [{capture,none}, dollar_endonly], + ReOpts = [{capture, none}, dollar_endonly], re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match end. set_design_doc_keys(Options1) -> - Dir = case lists:keyfind(dir, 1, Options1) of - {dir, D0} -> D0; - _ -> fwd - end, + Dir = + case lists:keyfind(dir, 1, Options1) of + {dir, D0} -> D0; + _ -> fwd + end, Options2 = set_design_doc_start_key(Options1, Dir), set_design_doc_end_key(Options2, Dir). - -define(FIRST_DDOC_KEY, <<"_design/">>). -define(LAST_DDOC_KEY, <<"_design0">>). - set_design_doc_start_key(Options, fwd) -> Key1 = couch_util:get_value(start_key, Options, ?FIRST_DDOC_KEY), - Key2 = case Key1 < ?FIRST_DDOC_KEY of - true -> ?FIRST_DDOC_KEY; - false -> Key1 - end, + Key2 = + case Key1 < ?FIRST_DDOC_KEY of + true -> ?FIRST_DDOC_KEY; + false -> Key1 + end, lists:keystore(start_key, 1, Options, {start_key, Key2}); set_design_doc_start_key(Options, rev) -> Key1 = couch_util:get_value(start_key, Options, ?LAST_DDOC_KEY), - Key2 = case Key1 > ?LAST_DDOC_KEY of - true -> ?LAST_DDOC_KEY; - false -> Key1 - end, + Key2 = + case Key1 > ?LAST_DDOC_KEY of + true -> ?LAST_DDOC_KEY; + false -> Key1 + end, lists:keystore(start_key, 1, Options, {start_key, Key2}). - set_design_doc_end_key(Options, fwd) -> case couch_util:get_value(end_key_gt, Options) of undefined -> Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY), - Key2 = case Key1 > ?LAST_DDOC_KEY of - true -> ?LAST_DDOC_KEY; - false -> Key1 - end, + Key2 = + case Key1 > ?LAST_DDOC_KEY of + true -> ?LAST_DDOC_KEY; + false -> Key1 + end, lists:keystore(end_key, 1, Options, {end_key, Key2}); EKeyGT -> - Key2 = case EKeyGT > ?LAST_DDOC_KEY of - true -> ?LAST_DDOC_KEY; - false -> EKeyGT - end, + Key2 = + case EKeyGT > ?LAST_DDOC_KEY of + true -> ?LAST_DDOC_KEY; + false -> EKeyGT + end, lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2}) end; set_design_doc_end_key(Options, rev) -> case couch_util:get_value(end_key_gt, Options) of undefined -> Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY), - Key2 = case Key1 < ?FIRST_DDOC_KEY of - true -> ?FIRST_DDOC_KEY; - false -> Key1 - end, + Key2 = + case Key1 < ?FIRST_DDOC_KEY of + true -> ?FIRST_DDOC_KEY; + false -> Key1 + end, lists:keystore(end_key, 1, Options, {end_key, Key2}); EKeyGT -> - Key2 = case EKeyGT < ?FIRST_DDOC_KEY of - true -> ?FIRST_DDOC_KEY; - false -> EKeyGT - end, + Key2 = + case EKeyGT < ?FIRST_DDOC_KEY of + true -> ?FIRST_DDOC_KEY; + false -> EKeyGT + end, lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2}) end. - -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). @@ -1900,11 +2139,14 @@ teardown(_) -> validate_dbname_success_test_() -> Cases = - generate_cases_with_shards("long/co$mplex-/path+/something") - ++ generate_cases_with_shards("something") - ++ lists:append( - [generate_cases_with_shards(?b2l(SystemDb)) - || SystemDb <- ?SYSTEM_DATABASES]), + generate_cases_with_shards("long/co$mplex-/path+/something") ++ + generate_cases_with_shards("something") ++ + lists:append( + [ + generate_cases_with_shards(?b2l(SystemDb)) + || SystemDb <- ?SYSTEM_DATABASES + ] + ), { setup, fun setup_all/0, @@ -1918,12 +2160,13 @@ validate_dbname_success_test_() -> }. validate_dbname_fail_test_() -> - Cases = generate_cases("_long/co$mplex-/path+/_something") - ++ generate_cases("_something") - ++ generate_cases_with_shards("long/co$mplex-/path+/_something#") - ++ generate_cases_with_shards("long/co$mplex-/path+/some.thing") - ++ generate_cases("!abcdefg/werwej/_users") - ++ generate_cases_with_shards("!abcdefg/werwej/_users"), + Cases = + generate_cases("_long/co$mplex-/path+/_something") ++ + generate_cases("_something") ++ + generate_cases_with_shards("long/co$mplex-/path+/_something#") ++ + generate_cases_with_shards("long/co$mplex-/path+/some.thing") ++ + generate_cases("!abcdefg/werwej/_users") ++ + generate_cases_with_shards("!abcdefg/werwej/_users"), { setup, fun setup_all/0, @@ -1937,41 +2180,56 @@ validate_dbname_fail_test_() -> }. normalize_dbname_test_() -> - Cases = generate_cases_with_shards("long/co$mplex-/path+/_something") - ++ generate_cases_with_shards("_something"), + Cases = + generate_cases_with_shards("long/co$mplex-/path+/_something") ++ + generate_cases_with_shards("_something"), WithExpected = [{?l2b(filename:rootname(A)), B} || {A, B} <- Cases], - [{test_name({Expected, Db}), ?_assertEqual(Expected, normalize_dbname(Db))} - || {Expected, Db} <- WithExpected]. + [ + {test_name({Expected, Db}), ?_assertEqual(Expected, normalize_dbname(Db))} + || {Expected, Db} <- WithExpected + ]. dbname_suffix_test_() -> - Cases = generate_cases_with_shards("long/co$mplex-/path+/_something") - ++ generate_cases_with_shards("_something"), + Cases = + generate_cases_with_shards("long/co$mplex-/path+/_something") ++ + generate_cases_with_shards("_something"), WithExpected = [{?l2b(filename:basename(Arg)), Db} || {Arg, Db} <- Cases], - [{test_name({Expected, Db}), ?_assertEqual(Expected, dbname_suffix(Db))} - || {Expected, Db} <- WithExpected]. + [ + {test_name({Expected, Db}), ?_assertEqual(Expected, dbname_suffix(Db))} + || {Expected, Db} <- WithExpected + ]. is_system_db_name_test_() -> - Cases = lists:append([ - generate_cases_with_shards("long/co$mplex-/path+/" ++ ?b2l(Db)) - || Db <- ?SYSTEM_DATABASES] - ++ [generate_cases_with_shards(?b2l(Db)) || Db <- ?SYSTEM_DATABASES - ]), - WithExpected = [{?l2b(filename:basename(filename:rootname(Arg))), Db} - || {Arg, Db} <- Cases], - [{test_name({Expected, Db}) ++ " in ?SYSTEM_DATABASES", - ?_assert(is_system_db_name(Db))} || {Expected, Db} <- WithExpected]. + Cases = lists:append( + [ + generate_cases_with_shards("long/co$mplex-/path+/" ++ ?b2l(Db)) + || Db <- ?SYSTEM_DATABASES + ] ++ + [generate_cases_with_shards(?b2l(Db)) || Db <- ?SYSTEM_DATABASES] + ), + WithExpected = [ + {?l2b(filename:basename(filename:rootname(Arg))), Db} + || {Arg, Db} <- Cases + ], + [ + {test_name({Expected, Db}) ++ " in ?SYSTEM_DATABASES", ?_assert(is_system_db_name(Db))} + || {Expected, Db} <- WithExpected + ]. should_pass_validate_dbname(DbName) -> {test_name(DbName), ?_assertEqual(ok, validate_dbname(DbName))}. should_fail_validate_dbname(DbName) -> - {test_name(DbName), ?_test(begin - Result = validate_dbname(DbName), - ?assertMatch({error, {illegal_database_name, _}}, Result), - {error, {illegal_database_name, FailedDbName}} = Result, - ?assertEqual(to_binary(DbName), FailedDbName), - ok - end)}. + { + test_name(DbName), + ?_test(begin + Result = validate_dbname(DbName), + ?assertMatch({error, {illegal_database_name, _}}, Result), + {error, {illegal_database_name, FailedDbName}} = Result, + ?assertEqual(to_binary(DbName), FailedDbName), + ok + end) + }. calculate_start_seq_test_() -> { @@ -2087,9 +2345,12 @@ generate_cases_with_shards(DbName) -> DbNameWithShard = add_shard(DbName), DbNameWithShardAndExtension = add_shard(DbName) ++ ".couch", Cases = [ - DbName, ?l2b(DbName), - DbNameWithShard, ?l2b(DbNameWithShard), - DbNameWithShardAndExtension, ?l2b(DbNameWithShardAndExtension) + DbName, + ?l2b(DbName), + DbNameWithShard, + ?l2b(DbNameWithShard), + DbNameWithShardAndExtension, + ?l2b(DbNameWithShardAndExtension) ], [{DbName, Case} || Case <- Cases]. diff --git a/src/couch/src/couch_db_engine.erl b/src/couch/src/couch_db_engine.erl index 918dabcca..de4a42495 100644 --- a/src/couch/src/couch_db_engine.erl +++ b/src/couch/src/couch_db_engine.erl @@ -12,11 +12,9 @@ -module(couch_db_engine). - -include("couch_db.hrl"). -include("couch_db_int.hrl"). - -type filepath() :: iolist(). -type docid() :: binary(). -type rev() :: {non_neg_integer(), binary()}. @@ -26,75 +24,82 @@ -type purge_seq() :: non_neg_integer(). -type doc_pair() :: { - #full_doc_info{} | not_found, - #full_doc_info{} | not_found - }. + #full_doc_info{} | not_found, + #full_doc_info{} | not_found +}. -type doc_pairs() :: [doc_pair()]. -type db_open_options() :: [ - create - ]. + create +]. -type delete_options() :: [ - {context, delete | compaction} | - sync - ]. + {context, delete | compaction} + | sync +]. -type purge_info() :: {purge_seq(), uuid(), docid(), revs()}. --type epochs() :: [{Node::atom(), UpdateSeq::non_neg_integer()}]. --type size_info() :: [{Name::atom(), Size::non_neg_integer()}]. +-type epochs() :: [{Node :: atom(), UpdateSeq :: non_neg_integer()}]. +-type size_info() :: [{Name :: atom(), Size :: non_neg_integer()}]. -type partition_info() :: [ - {partition, Partition::binary()} | - {doc_count, DocCount::non_neg_integer()} | - {doc_del_count, DocDelCount::non_neg_integer()} | - {sizes, size_info()} + {partition, Partition :: binary()} + | {doc_count, DocCount :: non_neg_integer()} + | {doc_del_count, DocDelCount :: non_neg_integer()} + | {sizes, size_info()} ]. -type write_stream_options() :: [ - {buffer_size, Size::pos_integer()} | - {encoding, atom()} | - {compression_level, non_neg_integer()} - ]. + {buffer_size, Size :: pos_integer()} + | {encoding, atom()} + | {compression_level, non_neg_integer()} +]. -type doc_fold_options() :: [ - {start_key, Key::any()} | - {end_key, Key::any()} | - {end_key_gt, Key::any()} | - {dir, fwd | rev} | - include_reductions | - include_deleted - ]. + {start_key, Key :: any()} + | {end_key, Key :: any()} + | {end_key_gt, Key :: any()} + | {dir, fwd | rev} + | include_reductions + | include_deleted +]. -type changes_fold_options() :: [ - {dir, fwd | rev} - ]. + {dir, fwd | rev} +]. -type purge_fold_options() :: [ - {start_key, Key::any()} | - {end_key, Key::any()} | - {end_key_gt, Key::any()} | - {dir, fwd | rev} - ]. + {start_key, Key :: any()} + | {end_key, Key :: any()} + | {end_key_gt, Key :: any()} + | {dir, fwd | rev} +]. -type db_handle() :: any(). --type doc_fold_fun() :: fun((#full_doc_info{}, UserAcc::any()) -> - {ok, NewUserAcc::any()} | - {stop, NewUserAcc::any()}). - --type local_doc_fold_fun() :: fun((#doc{}, UserAcc::any()) -> - {ok, NewUserAcc::any()} | - {stop, NewUserAcc::any()}). - --type changes_fold_fun() :: fun((#doc_info{}, UserAcc::any()) -> - {ok, NewUserAcc::any()} | - {stop, NewUserAcc::any()}). - --type purge_fold_fun() :: fun((purge_info(), UserAcc::any()) -> - {ok, NewUserAcc::any()} | - {stop, NewUserAcc::any()}). - +-type doc_fold_fun() :: fun( + (#full_doc_info{}, UserAcc :: any()) -> + {ok, NewUserAcc :: any()} + | {stop, NewUserAcc :: any()} +). + +-type local_doc_fold_fun() :: fun( + (#doc{}, UserAcc :: any()) -> + {ok, NewUserAcc :: any()} + | {stop, NewUserAcc :: any()} +). + +-type changes_fold_fun() :: fun( + (#doc_info{}, UserAcc :: any()) -> + {ok, NewUserAcc :: any()} + | {stop, NewUserAcc :: any()} +). + +-type purge_fold_fun() :: fun( + (purge_info(), UserAcc :: any()) -> + {ok, NewUserAcc :: any()} + | {stop, NewUserAcc :: any()} +). % This is called by couch_server to determine which % engine should be used for the given database. DbPath @@ -102,8 +107,7 @@ % extension for a given engine. The first engine to % return true is the engine that will be used for the % database. --callback exists(DbPath::filepath()) -> boolean(). - +-callback exists(DbPath :: filepath()) -> boolean(). % This is called by couch_server to delete a database. It % is called from inside the couch_server process which @@ -112,11 +116,11 @@ % context. Although since this is executed in the context % of couch_server it should return relatively quickly. -callback delete( - RootDir::filepath(), - DbPath::filepath(), - DelOpts::delete_options()) -> - ok | {error, Reason::atom()}. - + RootDir :: filepath(), + DbPath :: filepath(), + DelOpts :: delete_options() +) -> + ok | {error, Reason :: atom()}. % This function can be called from multiple contexts. It % will either be called just before a call to delete/3 above @@ -125,11 +129,11 @@ % remove any temporary files used during compaction that % may be used to recover from a failed compaction swap. -callback delete_compaction_files( - RootDir::filepath(), - DbPath::filepath(), - DelOpts::delete_options()) -> - ok. - + RootDir :: filepath(), + DbPath :: filepath(), + DelOpts :: delete_options() +) -> + ok. % This is called from the couch_db_updater:init/1 context. As % such this means that it is guaranteed to only have one process @@ -145,41 +149,36 @@ % its guaranteed that the handle will only ever be mutated % in a single threaded context (ie, within the couch_db_updater % process). --callback init(DbPath::filepath(), db_open_options()) -> - {ok, DbHandle::db_handle()}. - +-callback init(DbPath :: filepath(), db_open_options()) -> + {ok, DbHandle :: db_handle()}. % This is called in the context of couch_db_updater:terminate/2 % and as such has the same properties for init/2. It's guaranteed % to be consistent for a given database but may be called by many % databases concurrently. --callback terminate(Reason::any(), DbHandle::db_handle()) -> Ignored::any(). - +-callback terminate(Reason :: any(), DbHandle :: db_handle()) -> Ignored :: any(). % This is called in the context of couch_db_updater:handle_call/3 % for any message that is unknown. It can be used to handle messages % from asynchronous processes like the engine's compactor if it has one. --callback handle_db_updater_call(Msg::any(), DbHandle::db_handle()) -> - {reply, Resp::any(), NewDbHandle::db_handle()} | - {stop, Reason::any(), Resp::any(), NewDbHandle::db_handle()}. - +-callback handle_db_updater_call(Msg :: any(), DbHandle :: db_handle()) -> + {reply, Resp :: any(), NewDbHandle :: db_handle()} + | {stop, Reason :: any(), Resp :: any(), NewDbHandle :: db_handle()}. % This is called in the context of couch_db_updater:handle_info/2 % and has the same properties as handle_call/3. --callback handle_db_updater_info(Msg::any(), DbHandle::db_handle()) -> - {noreply, NewDbHandle::db_handle()} | - {noreply, NewDbHandle::db_handle(), Timeout::timeout()} | - {stop, Reason::any(), NewDbHandle::db_handle()}. - +-callback handle_db_updater_info(Msg :: any(), DbHandle :: db_handle()) -> + {noreply, NewDbHandle :: db_handle()} + | {noreply, NewDbHandle :: db_handle(), Timeout :: timeout()} + | {stop, Reason :: any(), NewDbHandle :: db_handle()}. % These functions are called by any process opening or closing % a database. As such they need to be able to handle being % called concurrently. For example, the legacy engine uses these % to add monitors to the main engine process. --callback incref(DbHandle::db_handle()) -> {ok, NewDbHandle::db_handle()}. --callback decref(DbHandle::db_handle()) -> ok. --callback monitored_by(DbHande::db_handle()) -> [pid()]. - +-callback incref(DbHandle :: db_handle()) -> {ok, NewDbHandle :: db_handle()}. +-callback decref(DbHandle :: db_handle()) -> ok. +-callback monitored_by(DbHande :: db_handle()) -> [pid()]. % This is called in the context of couch_db_updater:handle_info/2 % and should return the timestamp of the last activity of @@ -187,8 +186,7 @@ % value would be hard to report its ok to just return the % result of os:timestamp/0 as this will just disable idle % databases from automatically closing. --callback last_activity(DbHandle::db_handle()) -> erlang:timestamp(). - +-callback last_activity(DbHandle :: db_handle()) -> erlang:timestamp(). % All of the get_* functions may be called from many % processes concurrently. @@ -196,25 +194,21 @@ % The database should make a note of the update sequence when it % was last compacted. If the database doesn't need compacting it % can just hard code a return value of 0. --callback get_compacted_seq(DbHandle::db_handle()) -> - CompactedSeq::non_neg_integer(). - +-callback get_compacted_seq(DbHandle :: db_handle()) -> + CompactedSeq :: non_neg_integer(). % The number of documents in the database which have all leaf % revisions marked as deleted. --callback get_del_doc_count(DbHandle::db_handle()) -> - DelDocCount::non_neg_integer(). - +-callback get_del_doc_count(DbHandle :: db_handle()) -> + DelDocCount :: non_neg_integer(). % This number is reported in the database info properties and % as such can be any JSON value. --callback get_disk_version(DbHandle::db_handle()) -> Version::json(). - +-callback get_disk_version(DbHandle :: db_handle()) -> Version :: json(). % The number of documents in the database that have one or more % leaf revisions not marked as deleted. --callback get_doc_count(DbHandle::db_handle()) -> DocCount::non_neg_integer(). - +-callback get_doc_count(DbHandle :: db_handle()) -> DocCount :: non_neg_integer(). % The epochs track which node owned the database starting at % a given update sequence. Each time a database is opened it @@ -222,36 +216,29 @@ % for the current node it should add an entry that will be % written the next time a write is performed. An entry is % simply a {node(), CurrentUpdateSeq} tuple. --callback get_epochs(DbHandle::db_handle()) -> Epochs::epochs(). - +-callback get_epochs(DbHandle :: db_handle()) -> Epochs :: epochs(). % Get the current purge sequence known to the engine. This % value should be updated during calls to purge_docs. --callback get_purge_seq(DbHandle::db_handle()) -> purge_seq(). - +-callback get_purge_seq(DbHandle :: db_handle()) -> purge_seq(). % Get the oldest purge sequence known to the engine --callback get_oldest_purge_seq(DbHandle::db_handle()) -> purge_seq(). - +-callback get_oldest_purge_seq(DbHandle :: db_handle()) -> purge_seq(). % Get the purged infos limit. This should just return the last % value that was passed to set_purged_docs_limit/2. --callback get_purge_infos_limit(DbHandle::db_handle()) -> pos_integer(). - +-callback get_purge_infos_limit(DbHandle :: db_handle()) -> pos_integer(). % Get the revision limit. This should just return the last % value that was passed to set_revs_limit/2. --callback get_revs_limit(DbHandle::db_handle()) -> RevsLimit::pos_integer(). - +-callback get_revs_limit(DbHandle :: db_handle()) -> RevsLimit :: pos_integer(). % Get the current security properties. This should just return % the last value that was passed to set_security/2. --callback get_security(DbHandle::db_handle()) -> SecProps::any(). - +-callback get_security(DbHandle :: db_handle()) -> SecProps :: any(). % Get the current properties. --callback get_props(DbHandle::db_handle()) -> Props::[any()]. - +-callback get_props(DbHandle :: db_handle()) -> Props :: [any()]. % This information is displayed in the database info poperties. It % should just be a list of {Name::atom(), Size::non_neg_integer()} @@ -266,8 +253,7 @@ % external - Number of bytes that would be required to represent the % contents outside of the database (for capacity and backup % planning) --callback get_size_info(DbHandle::db_handle()) -> SizeInfo::size_info(). - +-callback get_size_info(DbHandle :: db_handle()) -> SizeInfo :: size_info(). % This returns the information for the given partition. % It should just be a list of {Name::atom(), Size::non_neg_integer()} @@ -277,56 +263,49 @@ % % external - Number of bytes that would be required to represent the % contents of this partition outside of the database --callback get_partition_info(DbHandle::db_handle(), Partition::binary()) -> +-callback get_partition_info(DbHandle :: db_handle(), Partition :: binary()) -> partition_info(). - % The current update sequence of the database. The update % sequence should be incrememnted for every revision added to % the database. --callback get_update_seq(DbHandle::db_handle()) -> UpdateSeq::non_neg_integer(). - +-callback get_update_seq(DbHandle :: db_handle()) -> UpdateSeq :: non_neg_integer(). % Whenever a database is created it should generate a % persistent UUID for identification in case the shard should % ever need to be moved between nodes in a cluster. --callback get_uuid(DbHandle::db_handle()) -> UUID::binary(). - +-callback get_uuid(DbHandle :: db_handle()) -> UUID :: binary(). % These functions are only called by couch_db_updater and % as such are guaranteed to be single threaded calls. The % database should simply store these values somewhere so % they can be returned by the corresponding get_* calls. --callback set_revs_limit(DbHandle::db_handle(), RevsLimit::pos_integer()) -> - {ok, NewDbHandle::db_handle()}. - +-callback set_revs_limit(DbHandle :: db_handle(), RevsLimit :: pos_integer()) -> + {ok, NewDbHandle :: db_handle()}. --callback set_purge_infos_limit(DbHandle::db_handle(), Limit::pos_integer()) -> - {ok, NewDbHandle::db_handle()}. - - --callback set_security(DbHandle::db_handle(), SecProps::any()) -> - {ok, NewDbHandle::db_handle()}. +-callback set_purge_infos_limit(DbHandle :: db_handle(), Limit :: pos_integer()) -> + {ok, NewDbHandle :: db_handle()}. +-callback set_security(DbHandle :: db_handle(), SecProps :: any()) -> + {ok, NewDbHandle :: db_handle()}. % This function is only called by couch_db_updater and % as such is guaranteed to be single threaded calls. The % database should simply store provided property list % unaltered. --callback set_props(DbHandle::db_handle(), Props::any()) -> - {ok, NewDbHandle::db_handle()}. - +-callback set_props(DbHandle :: db_handle(), Props :: any()) -> + {ok, NewDbHandle :: db_handle()}. % Set the current update sequence of the database. The intention is to use this % when copying a database such that the destination update sequence should % match exactly the source update sequence. -callback set_update_seq( - DbHandle::db_handle(), - UpdateSeq::non_neg_integer()) -> - {ok, NewDbHandle::db_handle()}. - + DbHandle :: db_handle(), + UpdateSeq :: non_neg_integer() +) -> + {ok, NewDbHandle :: db_handle()}. % This function will be called by many processes concurrently. % It should return a #full_doc_info{} record or not_found for @@ -337,9 +316,8 @@ % were present in the database when the DbHandle was retrieved % from couch_server. It is currently unknown what would break % if a storage engine deviated from that property. --callback open_docs(DbHandle::db_handle(), DocIds::[docid()]) -> - [#full_doc_info{} | not_found]. - +-callback open_docs(DbHandle :: db_handle(), DocIds :: [docid()]) -> + [#full_doc_info{} | not_found]. % This function will be called by many processes concurrently. % It should return a #doc{} record or not_found for every @@ -349,9 +327,8 @@ % apply to this function (although this function is called % rather less frequently so it may not be as big of an % issue). --callback open_local_docs(DbHandle::db_handle(), DocIds::[docid()]) -> - [#doc{} | not_found]. - +-callback open_local_docs(DbHandle :: db_handle(), DocIds :: [docid()]) -> + [#doc{} | not_found]. % This function will be called from many contexts concurrently. % The provided RawDoc is a #doc{} record that has its body @@ -360,18 +337,16 @@ % This API exists so that storage engines can store document % bodies externally from the #full_doc_info{} record (which % is the traditional approach and is recommended). --callback read_doc_body(DbHandle::db_handle(), RawDoc::doc()) -> - doc(). - +-callback read_doc_body(DbHandle :: db_handle(), RawDoc :: doc()) -> + doc(). % This function will be called from many contexts concurrently. % If the storage engine has a purge_info() record for any of the % provided UUIDs, those purge_info() records should be returned. The % resulting list should have the same length as the input list of % UUIDs. --callback load_purge_infos(DbHandle::db_handle(), [uuid()]) -> - [purge_info() | not_found]. - +-callback load_purge_infos(DbHandle :: db_handle(), [uuid()]) -> + [purge_info() | not_found]. % This function is called concurrently by any client process % that is writing a document. It should accept a #doc{} @@ -382,9 +357,8 @@ % document bodies in parallel by client processes rather % than forcing all compression to occur single threaded % in the context of the couch_db_updater process. --callback serialize_doc(DbHandle::db_handle(), Doc::doc()) -> - doc(). - +-callback serialize_doc(DbHandle :: db_handle(), Doc :: doc()) -> + doc(). % This function is called in the context of a couch_db_updater % which means its single threaded for the given DbHandle. @@ -397,9 +371,8 @@ % The BytesWritten return value is used to determine the number % of active bytes in the database which can is used to make % a determination of when to compact this database. --callback write_doc_body(DbHandle::db_handle(), Doc::doc()) -> - {ok, FlushedDoc::doc(), BytesWritten::non_neg_integer()}. - +-callback write_doc_body(DbHandle :: db_handle(), Doc :: doc()) -> + {ok, FlushedDoc :: doc(), BytesWritten :: non_neg_integer()}. % This function is called from the context of couch_db_updater % and as such is guaranteed single threaded for the given @@ -435,11 +408,11 @@ % batches are non-deterministic (from the point of view of the % client). -callback write_doc_infos( - DbHandle::db_handle(), - Pairs::doc_pairs(), - LocalDocs::[#doc{}]) -> - {ok, NewDbHandle::db_handle()}. - + DbHandle :: db_handle(), + Pairs :: doc_pairs(), + LocalDocs :: [#doc{}] +) -> + {ok, NewDbHandle :: db_handle()}. % This function is called from the context of couch_db_updater % and as such is guaranteed single threaded for the given @@ -470,25 +443,22 @@ % revisions that were requested to be purged. This should be persisted % in such a way that we can efficiently load purge_info() by its UUID % as well as iterate over purge_info() entries in order of their PurgeSeq. --callback purge_docs(DbHandle::db_handle(), [doc_pair()], [purge_info()]) -> - {ok, NewDbHandle::db_handle()}. - +-callback purge_docs(DbHandle :: db_handle(), [doc_pair()], [purge_info()]) -> + {ok, NewDbHandle :: db_handle()}. % This function should be called from a single threaded context and % should be used to copy purge infos from on database to another % when copying a database --callback copy_purge_infos(DbHandle::db_handle(), [purge_info()]) -> - {ok, NewDbHandle::db_handle()}. - +-callback copy_purge_infos(DbHandle :: db_handle(), [purge_info()]) -> + {ok, NewDbHandle :: db_handle()}. % This function is called in the context of couch_db_udpater and % as such is single threaded for any given DbHandle. % % This call is made periodically to ensure that the database has % stored all updates on stable storage. (ie, here is where you fsync). --callback commit_data(DbHandle::db_handle()) -> - {ok, NewDbHande::db_handle()}. - +-callback commit_data(DbHandle :: db_handle()) -> + {ok, NewDbHande :: db_handle()}. % This function is called by multiple processes concurrently. % @@ -502,20 +472,18 @@ % Currently an engine can elect to not implement these API's % by throwing the atom not_supported. -callback open_write_stream( - DbHandle::db_handle(), - Options::write_stream_options()) -> - {ok, pid()}. - + DbHandle :: db_handle(), + Options :: write_stream_options() +) -> + {ok, pid()}. % See the documentation for open_write_stream --callback open_read_stream(DbHandle::db_handle(), StreamDiskInfo::any()) -> - {ok, {Module::atom(), ReadStreamState::any()}}. - +-callback open_read_stream(DbHandle :: db_handle(), StreamDiskInfo :: any()) -> + {ok, {Module :: atom(), ReadStreamState :: any()}}. % See the documentation for open_write_stream --callback is_active_stream(DbHandle::db_handle(), ReadStreamState::any()) -> - boolean(). - +-callback is_active_stream(DbHandle :: db_handle(), ReadStreamState :: any()) -> + boolean(). % This funciton is called by many processes concurrently. % @@ -567,12 +535,12 @@ % that actually happening so a storage engine that includes new results % between invocations shouldn't have any issues. -callback fold_docs( - DbHandle::db_handle(), - UserFold::doc_fold_fun(), - UserAcc::any(), - doc_fold_options()) -> - {ok, LastUserAcc::any()}. - + DbHandle :: db_handle(), + UserFold :: doc_fold_fun(), + UserAcc :: any(), + doc_fold_options() +) -> + {ok, LastUserAcc :: any()}. % This function may be called by many processes concurrently. % @@ -580,12 +548,12 @@ % should only return local documents and the first argument to the % user function is a #doc{} record, not a #full_doc_info{}. -callback fold_local_docs( - DbHandle::db_handle(), - UserFold::local_doc_fold_fun(), - UserAcc::any(), - doc_fold_options()) -> - {ok, LastUserAcc::any()}. - + DbHandle :: db_handle(), + UserFold :: local_doc_fold_fun(), + UserAcc :: any(), + doc_fold_options() +) -> + {ok, LastUserAcc :: any()}. % This function may be called by many processes concurrently. % @@ -608,13 +576,13 @@ % The only option currently supported by the API is the `dir` % option that should behave the same as for fold_docs. -callback fold_changes( - DbHandle::db_handle(), - StartSeq::non_neg_integer(), - UserFold::changes_fold_fun(), - UserAcc::any(), - changes_fold_options()) -> - {ok, LastUserAcc::any()}. - + DbHandle :: db_handle(), + StartSeq :: non_neg_integer(), + UserFold :: changes_fold_fun(), + UserAcc :: any(), + changes_fold_options() +) -> + {ok, LastUserAcc :: any()}. % This function may be called by many processes concurrently. % @@ -623,13 +591,13 @@ % % The StartPurgeSeq parameter indicates where the fold should start *after*. -callback fold_purge_infos( - DbHandle::db_handle(), - StartPurgeSeq::purge_seq(), - UserFold::purge_fold_fun(), - UserAcc::any(), - purge_fold_options()) -> - {ok, LastUserAcc::any()}. - + DbHandle :: db_handle(), + StartPurgeSeq :: purge_seq(), + UserFold :: purge_fold_fun(), + UserAcc :: any(), + purge_fold_options() +) -> + {ok, LastUserAcc :: any()}. % This function may be called by many processes concurrently. % @@ -647,10 +615,10 @@ % _active_tasks entry if the storage engine isn't accounted for by the % client. -callback count_changes_since( - DbHandle::db_handle(), - UpdateSeq::non_neg_integer()) -> - TotalChanges::non_neg_integer(). - + DbHandle :: db_handle(), + UpdateSeq :: non_neg_integer() +) -> + TotalChanges :: non_neg_integer(). % This function is called in the context of couch_db_updater and as % such is guaranteed to be single threaded for the given DbHandle. @@ -666,12 +634,12 @@ % must be the same engine that started the compaction and CompactInfo % is an arbitrary term that's passed to finish_compaction/4. -callback start_compaction( - DbHandle::db_handle(), - DbName::binary(), - Options::db_open_options(), - Parent::pid()) -> - {ok, NewDbHandle::db_handle(), CompactorPid::pid()}. - + DbHandle :: db_handle(), + DbName :: binary(), + Options :: db_open_options(), + Parent :: pid() +) -> + {ok, NewDbHandle :: db_handle(), CompactorPid :: pid()}. % This function is called in the context of couch_db_udpater and as % such is guarnateed to be single threaded for the given DbHandle. @@ -683,12 +651,12 @@ % to update the DbHandle state of the couch_db_updater it can as % finish_compaction/4 is called in the context of the couch_db_updater. -callback finish_compaction( - OldDbHandle::db_handle(), - DbName::binary(), - Options::db_open_options(), - CompactInfo::any()) -> - {ok, CompactedDbHandle::db_handle(), CompactorPid::pid() | undefined}. - + OldDbHandle :: db_handle(), + DbName :: binary(), + Options :: db_open_options(), + CompactInfo :: any() +) -> + {ok, CompactedDbHandle :: db_handle(), CompactorPid :: pid() | undefined}. -export([ exists/2, @@ -757,34 +725,29 @@ trigger_on_compact/1 ]). - exists(Engine, DbPath) -> Engine:exists(DbPath). - delete(Engine, RootDir, DbPath, DelOpts) when is_list(DelOpts) -> Engine:delete(RootDir, DbPath, DelOpts). - -delete_compaction_files(Engine, RootDir, DbPath, DelOpts) - when is_list(DelOpts) -> +delete_compaction_files(Engine, RootDir, DbPath, DelOpts) when + is_list(DelOpts) +-> Engine:delete_compaction_files(RootDir, DbPath, DelOpts). - init(Engine, DbPath, Options) -> case Engine:init(DbPath, Options) of - {ok, EngineState} -> - {ok, {Engine, EngineState}}; - Error -> - throw(Error) + {ok, EngineState} -> + {ok, {Engine, EngineState}}; + Error -> + throw(Error) end. - terminate(Reason, #db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:terminate(Reason, EngineState). - handle_db_updater_call(Msg, _From, #db{} = Db) -> #db{ engine = {Engine, EngineState} @@ -796,7 +759,6 @@ handle_db_updater_call(Msg, _From, #db{} = Db) -> {stop, Reason, Resp, Db#db{engine = {Engine, NewState}}} end. - handle_db_updater_info(Msg, #db{} = Db) -> #db{ name = Name, @@ -812,98 +774,79 @@ handle_db_updater_info(Msg, #db{} = Db) -> {stop, Reason, Db#db{engine = {Engine, NewState}}} end. - incref(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, {ok, NewState} = Engine:incref(EngineState), {ok, Db#db{engine = {Engine, NewState}}}. - decref(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:decref(EngineState). - monitored_by(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:monitored_by(EngineState). - last_activity(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:last_activity(EngineState). - get_engine(#db{} = Db) -> #db{engine = {Engine, _}} = Db, Engine. - get_compacted_seq(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_compacted_seq(EngineState). - get_del_doc_count(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_del_doc_count(EngineState). - get_disk_version(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_disk_version(EngineState). - get_doc_count(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_doc_count(EngineState). - get_epochs(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_epochs(EngineState). - get_purge_seq(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_purge_seq(EngineState). - get_oldest_purge_seq(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_oldest_purge_seq(EngineState). - get_purge_infos_limit(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_purge_infos_limit(EngineState). - get_revs_limit(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_revs_limit(EngineState). - get_security(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_security(EngineState). - get_props(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_props(EngineState). - get_size_info(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_size_info(EngineState). - get_partition_info(#db{} = Db, Partition) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_partition_info(EngineState, Partition). - get_update_seq(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_update_seq(EngineState). @@ -912,134 +855,113 @@ get_uuid(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, Engine:get_uuid(EngineState). - set_revs_limit(#db{} = Db, RevsLimit) -> #db{engine = {Engine, EngineState}} = Db, {ok, NewSt} = Engine:set_revs_limit(EngineState, RevsLimit), {ok, Db#db{engine = {Engine, NewSt}}}. - set_purge_infos_limit(#db{} = Db, PurgedDocsLimit) -> #db{engine = {Engine, EngineState}} = Db, {ok, NewSt} = Engine:set_purge_infos_limit(EngineState, PurgedDocsLimit), {ok, Db#db{engine = {Engine, NewSt}}}. - set_security(#db{} = Db, SecProps) -> #db{engine = {Engine, EngineState}} = Db, {ok, NewSt} = Engine:set_security(EngineState, SecProps), {ok, Db#db{engine = {Engine, NewSt}}}. - set_props(#db{} = Db, Props) -> #db{engine = {Engine, EngineState}} = Db, {ok, NewSt} = Engine:set_props(EngineState, Props), {ok, Db#db{engine = {Engine, NewSt}}}. - set_update_seq(#db{} = Db, UpdateSeq) -> #db{engine = {Engine, EngineState}} = Db, {ok, NewSt} = Engine:set_update_seq(EngineState, UpdateSeq), {ok, Db#db{engine = {Engine, NewSt}}}. - open_docs(#db{} = Db, DocIds) -> #db{engine = {Engine, EngineState}} = Db, Engine:open_docs(EngineState, DocIds). - open_local_docs(#db{} = Db, DocIds) -> #db{engine = {Engine, EngineState}} = Db, Engine:open_local_docs(EngineState, DocIds). - read_doc_body(#db{} = Db, RawDoc) -> #db{engine = {Engine, EngineState}} = Db, Engine:read_doc_body(EngineState, RawDoc). - load_purge_infos(#db{} = Db, UUIDs) -> #db{engine = {Engine, EngineState}} = Db, Engine:load_purge_infos(EngineState, UUIDs). - serialize_doc(#db{} = Db, #doc{} = Doc) -> #db{engine = {Engine, EngineState}} = Db, Engine:serialize_doc(EngineState, Doc). - write_doc_body(#db{} = Db, #doc{} = Doc) -> #db{engine = {Engine, EngineState}} = Db, Engine:write_doc_body(EngineState, Doc). - write_doc_infos(#db{} = Db, DocUpdates, LocalDocs) -> #db{engine = {Engine, EngineState}} = Db, {ok, NewSt} = Engine:write_doc_infos(EngineState, DocUpdates, LocalDocs), {ok, Db#db{engine = {Engine, NewSt}}}. - purge_docs(#db{} = Db, DocUpdates, Purges) -> #db{engine = {Engine, EngineState}} = Db, {ok, NewSt} = Engine:purge_docs( - EngineState, DocUpdates, Purges), + EngineState, DocUpdates, Purges + ), {ok, Db#db{engine = {Engine, NewSt}}}. - copy_purge_infos(#db{} = Db, Purges) -> #db{engine = {Engine, EngineState}} = Db, {ok, NewSt} = Engine:copy_purge_infos( - EngineState, Purges), + EngineState, Purges + ), {ok, Db#db{engine = {Engine, NewSt}}}. - commit_data(#db{} = Db) -> #db{engine = {Engine, EngineState}} = Db, {ok, NewSt} = Engine:commit_data(EngineState), {ok, Db#db{engine = {Engine, NewSt}}}. - open_write_stream(#db{} = Db, Options) -> #db{engine = {Engine, EngineState}} = Db, Engine:open_write_stream(EngineState, Options). - open_read_stream(#db{} = Db, StreamDiskInfo) -> #db{engine = {Engine, EngineState}} = Db, Engine:open_read_stream(EngineState, StreamDiskInfo). - is_active_stream(#db{} = Db, ReadStreamState) -> #db{engine = {Engine, EngineState}} = Db, Engine:is_active_stream(EngineState, ReadStreamState). - fold_docs(#db{} = Db, UserFun, UserAcc, Options) -> #db{engine = {Engine, EngineState}} = Db, Engine:fold_docs(EngineState, UserFun, UserAcc, Options). - fold_local_docs(#db{} = Db, UserFun, UserAcc, Options) -> #db{engine = {Engine, EngineState}} = Db, Engine:fold_local_docs(EngineState, UserFun, UserAcc, Options). - fold_changes(#db{} = Db, StartSeq, UserFun, UserAcc, Options) -> #db{engine = {Engine, EngineState}} = Db, Engine:fold_changes(EngineState, StartSeq, UserFun, UserAcc, Options). - fold_purge_infos(#db{} = Db, StartPurgeSeq, UserFun, UserAcc, Options) -> #db{engine = {Engine, EngineState}} = Db, Engine:fold_purge_infos( - EngineState, StartPurgeSeq, UserFun, UserAcc, Options). - + EngineState, StartPurgeSeq, UserFun, UserAcc, Options + ). count_changes_since(#db{} = Db, StartSeq) -> #db{engine = {Engine, EngineState}} = Db, Engine:count_changes_since(EngineState, StartSeq). - start_compaction(#db{} = Db) -> #db{ engine = {Engine, EngineState}, @@ -1047,50 +969,53 @@ start_compaction(#db{} = Db) -> options = Options } = Db, {ok, NewEngineState, Pid} = Engine:start_compaction( - EngineState, DbName, Options, self()), + EngineState, DbName, Options, self() + ), {ok, Db#db{ engine = {Engine, NewEngineState}, compactor_pid = Pid }}. - finish_compaction(Db, CompactInfo) -> #db{ engine = {Engine, St}, name = DbName, options = Options } = Db, - NewDb = case Engine:finish_compaction(St, DbName, Options, CompactInfo) of - {ok, NewState, undefined} -> - couch_event:notify(DbName, compacted), - Db#db{ - engine = {Engine, NewState}, - compactor_pid = nil - }; - {ok, NewState, CompactorPid} when is_pid(CompactorPid) -> - Db#db{ - engine = {Engine, NewState}, - compactor_pid = CompactorPid - } - end, + NewDb = + case Engine:finish_compaction(St, DbName, Options, CompactInfo) of + {ok, NewState, undefined} -> + couch_event:notify(DbName, compacted), + Db#db{ + engine = {Engine, NewState}, + compactor_pid = nil + }; + {ok, NewState, CompactorPid} when is_pid(CompactorPid) -> + Db#db{ + engine = {Engine, NewState}, + compactor_pid = CompactorPid + } + end, ok = couch_server:db_updated(NewDb), {ok, NewDb}. - trigger_on_compact(DbName) -> {ok, DDocs} = get_ddocs(DbName), couch_db_plugin:on_compact(DbName, DDocs). - get_ddocs(<<"shards/", _/binary>> = DbName) -> {_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(mem3:dbname(DbName))) end), receive {'DOWN', Ref, _, _, {ok, JsonDDocs}} -> - {ok, lists:map(fun(JsonDDoc) -> - couch_doc:from_json_obj(JsonDDoc) - end, JsonDDocs)}; + {ok, + lists:map( + fun(JsonDDoc) -> + couch_doc:from_json_obj(JsonDDoc) + end, + JsonDDocs + )}; {'DOWN', Ref, _, _, Else} -> Else end; diff --git a/src/couch/src/couch_db_epi.erl b/src/couch/src/couch_db_epi.erl index 21879f683..870202bad 100644 --- a/src/couch/src/couch_db_epi.erl +++ b/src/couch/src/couch_db_epi.erl @@ -32,7 +32,6 @@ providers() -> {chttpd_handlers, couch_httpd_handlers} ]. - services() -> [ {couch_db, couch_db_plugin}, diff --git a/src/couch/src/couch_db_header.erl b/src/couch/src/couch_db_header.erl index 355364f9b..9c81ba6d0 100644 --- a/src/couch/src/couch_db_header.erl +++ b/src/couch/src/couch_db_header.erl @@ -12,7 +12,6 @@ -module(couch_db_header). - -export([ new/0, from/1, @@ -37,7 +36,6 @@ compacted_seq/1 ]). - % This should be updated anytime a header change happens that requires more % than filling in new defaults. % @@ -66,14 +64,12 @@ compacted_seq }). - new() -> #db_header{ uuid = couch_uuids:random(), epochs = [{node(), 0}] }. - from(Header0) -> Header = upgrade(Header0), #db_header{ @@ -82,16 +78,15 @@ from(Header0) -> compacted_seq = Header#db_header.compacted_seq }. - is_header(Header) -> try upgrade(Header), true - catch _:_ -> - false + catch + _:_ -> + false end. - upgrade(Header) -> Funs = [ fun upgrade_tuple/1, @@ -100,69 +95,63 @@ upgrade(Header) -> fun upgrade_epochs/1, fun upgrade_compacted_seq/1 ], - lists:foldl(fun(F, HdrAcc) -> - F(HdrAcc) - end, Header, Funs). - + lists:foldl( + fun(F, HdrAcc) -> + F(HdrAcc) + end, + Header, + Funs + ). set(Header0, Fields) -> % A subtlety here is that if a database was open during % the release upgrade that updates to uuids and epochs then % this dynamic upgrade also assigns a uuid and epoch. Header = upgrade(Header0), - lists:foldl(fun({Field, Value}, HdrAcc) -> - set_field(HdrAcc, Field, Value) - end, Header, Fields). - + lists:foldl( + fun({Field, Value}, HdrAcc) -> + set_field(HdrAcc, Field, Value) + end, + Header, + Fields + ). disk_version(Header) -> get_field(Header, disk_version). - update_seq(Header) -> get_field(Header, update_seq). - id_tree_state(Header) -> get_field(Header, id_tree_state). - seq_tree_state(Header) -> get_field(Header, seq_tree_state). - local_tree_state(Header) -> get_field(Header, local_tree_state). - purge_seq(Header) -> get_field(Header, purge_seq). - purged_docs(Header) -> get_field(Header, purged_docs). - security_ptr(Header) -> get_field(Header, security_ptr). - revs_limit(Header) -> get_field(Header, revs_limit). - uuid(Header) -> get_field(Header, uuid). - epochs(Header) -> get_field(Header, epochs). - compacted_seq(Header) -> get_field(Header, compacted_seq). - get_field(Header, Field) -> Idx = index(Field), case Idx > tuple_size(Header) of @@ -170,88 +159,97 @@ get_field(Header, Field) -> false -> element(index(Field), Header) end. - set_field(Header, Field, Value) -> setelement(index(Field), Header, Value). - index(Field) -> couch_util:get_value(Field, indexes()). - indexes() -> Fields = record_info(fields, db_header), Indexes = lists:seq(2, record_info(size, db_header)), lists:zip(Fields, Indexes). - upgrade_tuple(Old) when is_record(Old, db_header) -> Old; upgrade_tuple(Old) when is_tuple(Old) -> NewSize = record_info(size, db_header), - if tuple_size(Old) < NewSize -> ok; true -> - erlang:error({invalid_header_size, Old}) + if + tuple_size(Old) < NewSize -> ok; + true -> erlang:error({invalid_header_size, Old}) end, - {_, New} = lists:foldl(fun(Val, {Idx, Hdr}) -> - {Idx+1, setelement(Idx, Hdr, Val)} - end, {1, #db_header{}}, tuple_to_list(Old)), - if is_record(New, db_header) -> ok; true -> - erlang:error({invalid_header_extension, {Old, New}}) + {_, New} = lists:foldl( + fun(Val, {Idx, Hdr}) -> + {Idx + 1, setelement(Idx, Hdr, Val)} + end, + {1, #db_header{}}, + tuple_to_list(Old) + ), + if + is_record(New, db_header) -> ok; + true -> erlang:error({invalid_header_extension, {Old, New}}) end, New. -define(OLD_DISK_VERSION_ERROR, - "Database files from versions smaller than 0.10.0 are no longer supported"). + "Database files from versions smaller than 0.10.0 are no longer supported" +). -upgrade_disk_version(#db_header{}=Header) -> +upgrade_disk_version(#db_header{} = Header) -> case element(2, Header) of - 1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); - 2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); - 3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); - 4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11) - 5 -> Header; % pre 1.2 - ?LATEST_DISK_VERSION -> Header; + 1 -> + throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); + 2 -> + throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); + 3 -> + throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); + % [0.10 - 0.11) + 4 -> + Header#db_header{security_ptr = nil}; + % pre 1.2 + 5 -> + Header; + ?LATEST_DISK_VERSION -> + Header; _ -> Reason = "Incorrect disk header version", throw({database_disk_version_error, Reason}) end. - -upgrade_uuid(#db_header{}=Header) -> +upgrade_uuid(#db_header{} = Header) -> case Header#db_header.uuid of undefined -> % Upgrading this old db file to a newer % on disk format that includes a UUID. - Header#db_header{uuid=couch_uuids:random()}; + Header#db_header{uuid = couch_uuids:random()}; _ -> Header end. - -upgrade_epochs(#db_header{}=Header) -> - NewEpochs = case Header#db_header.epochs of - undefined -> - % This node is taking over ownership of shard with - % and old version of couch file. Before epochs there - % was always an implicit assumption that a file was - % owned since eternity by the node it was on. This - % just codifies that assumption. - [{node(), 0}]; - [{Node, _} | _] = Epochs0 when Node == node() -> - % Current node is the current owner of this db - Epochs0; - Epochs1 -> - % This node is taking over ownership of this db - % and marking the update sequence where it happened. - [{node(), Header#db_header.update_seq} | Epochs1] - end, +upgrade_epochs(#db_header{} = Header) -> + NewEpochs = + case Header#db_header.epochs of + undefined -> + % This node is taking over ownership of shard with + % and old version of couch file. Before epochs there + % was always an implicit assumption that a file was + % owned since eternity by the node it was on. This + % just codifies that assumption. + [{node(), 0}]; + [{Node, _} | _] = Epochs0 when Node == node() -> + % Current node is the current owner of this db + Epochs0; + Epochs1 -> + % This node is taking over ownership of this db + % and marking the update sequence where it happened. + [{node(), Header#db_header.update_seq} | Epochs1] + end, % Its possible for a node to open a db and claim % ownership but never make a write to the db. This % removes nodes that claimed ownership but never % changed the database. DedupedEpochs = remove_dup_epochs(NewEpochs), - Header#db_header{epochs=DedupedEpochs}. - + Header#db_header{epochs = DedupedEpochs}. % This is slightly relying on the udpate_seq's being sorted % in epochs due to how we only ever push things onto the @@ -260,12 +258,12 @@ upgrade_epochs(#db_header{}=Header) -> % want to remove dupes (by calling a sort on the input to this % function). So for now we don't sort but are relying on the % idea that epochs is always sorted. -remove_dup_epochs([_]=Epochs) -> +remove_dup_epochs([_] = Epochs) -> Epochs; remove_dup_epochs([{N1, S}, {_N2, S}]) -> % Seqs match, keep the most recent owner [{N1, S}]; -remove_dup_epochs([_, _]=Epochs) -> +remove_dup_epochs([_, _] = Epochs) -> % Seqs don't match. Epochs; remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) -> @@ -275,11 +273,10 @@ remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) -> % Seqs don't match, recurse to check others [{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])]. - -upgrade_compacted_seq(#db_header{}=Header) -> +upgrade_compacted_seq(#db_header{} = Header) -> case Header#db_header.compacted_seq of undefined -> - Header#db_header{compacted_seq=0}; + Header#db_header{compacted_seq = 0}; _ -> Header end. @@ -296,20 +293,30 @@ latest(_Else) -> mk_header(Vsn) -> { - db_header, % record name - Vsn, % disk version - 100, % update_seq - 0, % unused - foo, % id_tree_state - bar, % seq_tree_state - bam, % local_tree_state - 1, % purge_seq - baz, % purged_docs - bang, % security_ptr - 999 % revs_limit + % record name + db_header, + % disk version + Vsn, + % update_seq + 100, + % unused + 0, + % id_tree_state + foo, + % seq_tree_state + bar, + % local_tree_state + bam, + % purge_seq + 1, + % purged_docs + baz, + % security_ptr + bang, + % revs_limit + 999 }. - upgrade_v3_test() -> Vsn3Header = mk_header(3), NewHeader = upgrade_tuple(Vsn3Header), @@ -328,9 +335,10 @@ upgrade_v3_test() -> ?assertEqual(undefined, uuid(NewHeader)), ?assertEqual(undefined, epochs(NewHeader)), - ?assertThrow({database_disk_version_error, _}, - upgrade_disk_version(NewHeader)). - + ?assertThrow( + {database_disk_version_error, _}, + upgrade_disk_version(NewHeader) + ). upgrade_v5_test() -> Vsn5Header = mk_header(5), @@ -342,7 +350,6 @@ upgrade_v5_test() -> % Security ptr isn't changed for v5 headers ?assertEqual(bang, security_ptr(NewHeader)). - upgrade_uuid_test() -> Vsn5Header = mk_header(5), @@ -358,7 +365,6 @@ upgrade_uuid_test() -> ResetHeader = from(NewNewHeader), ?assertEqual(uuid(NewHeader), uuid(ResetHeader)). - upgrade_epochs_test() -> Vsn5Header = mk_header(5), @@ -391,15 +397,12 @@ upgrade_epochs_test() -> ResetHeader = from(NewNewHeader), ?assertEqual(OwnedEpochs, epochs(ResetHeader)). - get_uuid_from_old_header_test() -> Vsn5Header = mk_header(5), ?assertEqual(undefined, uuid(Vsn5Header)). - get_epochs_from_old_header_test() -> Vsn5Header = mk_header(5), ?assertEqual(undefined, epochs(Vsn5Header)). - -endif. diff --git a/src/couch/src/couch_db_plugin.erl b/src/couch/src/couch_db_plugin.erl index c3684c6e3..c84edc1b7 100644 --- a/src/couch/src/couch_db_plugin.erl +++ b/src/couch/src/couch_db_plugin.erl @@ -87,10 +87,10 @@ do_apply(Func, Args, Opts) -> maybe_handle(Func, Args, Default) -> Handle = couch_epi:get_handle(?SERVICE_ID), case couch_epi:decide(Handle, ?SERVICE_ID, Func, Args, []) of - no_decision when is_function(Default) -> - apply(Default, Args); - no_decision -> - Default; - {decided, Result} -> - Result + no_decision when is_function(Default) -> + apply(Default, Args); + no_decision -> + Default; + {decided, Result} -> + Result end. diff --git a/src/couch/src/couch_db_split.erl b/src/couch/src/couch_db_split.erl index 1aa86fb37..d219e3731 100644 --- a/src/couch/src/couch_db_split.erl +++ b/src/couch/src/couch_db_split.erl @@ -12,20 +12,16 @@ -module(couch_db_split). - -export([ split/3, copy_local_docs/3, cleanup_target/2 ]). - -include_lib("couch/include/couch_db.hrl"). - -define(DEFAULT_BUFFER_SIZE, 16777216). - -record(state, { source_db, source_uuid, @@ -51,11 +47,11 @@ atts = [] }). - % Public API split(Source, #{} = Targets, PickFun) when - map_size(Targets) >= 2, is_function(PickFun, 3) -> + map_size(Targets) >= 2, is_function(PickFun, 3) +-> case couch_db:open_int(Source, [?ADMIN_CTX]) of {ok, SourceDb} -> Engine = get_engine(SourceDb), @@ -74,16 +70,19 @@ split(Source, #{} = Targets, PickFun) when {error, missing_source} end. - copy_local_docs(Source, #{} = Targets0, PickFun) when - is_binary(Source), is_function(PickFun, 3) -> + is_binary(Source), is_function(PickFun, 3) +-> case couch_db:open_int(Source, [?ADMIN_CTX]) of {ok, SourceDb} -> try - Targets = maps:map(fun(_, DbName) -> - {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), - #target{db = Db, uuid = couch_db:get_uuid(Db)} - end, Targets0), + Targets = maps:map( + fun(_, DbName) -> + {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), + #target{db = Db, uuid = couch_db:get_uuid(Db)} + end, + Targets0 + ), SourceName = couch_db:name(SourceDb), try State = #state{ @@ -96,10 +95,13 @@ copy_local_docs(Source, #{} = Targets0, PickFun) when copy_local_docs(State), ok after - maps:map(fun(_, #target{db = Db} = T) -> - couch_db:close(Db), - T#target{db = undefined} - end, Targets) + maps:map( + fun(_, #target{db = Db} = T) -> + couch_db:close(Db), + T#target{db = undefined} + end, + Targets + ) end after couch_db:close(SourceDb) @@ -108,7 +110,6 @@ copy_local_docs(Source, #{} = Targets0, PickFun) when {error, missing_source} end. - cleanup_target(Source, Target) when is_binary(Source), is_binary(Target) -> case couch_db:open_int(Source, [?ADMIN_CTX]) of {ok, SourceDb} -> @@ -121,35 +122,40 @@ cleanup_target(Source, Target) when is_binary(Source), is_binary(Target) -> {error, missing_source} end. - % Private Functions split(SourceDb, Partitioned, Engine, Targets0, PickFun, {M, F, A} = HashFun) -> - Targets = maps:fold(fun(Key, DbName, Map) -> - case couch_db:validate_dbname(DbName) of - ok -> - ok; - {error, E} -> - throw({target_create_error, DbName, E, Map}) + Targets = maps:fold( + fun(Key, DbName, Map) -> + case couch_db:validate_dbname(DbName) of + ok -> + ok; + {error, E} -> + throw({target_create_error, DbName, E, Map}) + end, + case couch_server:lock(DbName, <<"shard splitting">>) of + ok -> + ok; + {error, Err} -> + throw({target_create_error, DbName, Err, Map}) + end, + {ok, Filepath} = couch_server:get_engine_path(DbName, Engine), + Opts = + [create, ?ADMIN_CTX] ++ + case Partitioned of + true -> [{props, [{partitioned, true}, {hash, [M, F, A]}]}]; + false -> [] + end, + case couch_db:start_link(Engine, DbName, Filepath, Opts) of + {ok, Db} -> + Map#{Key => #target{db = Db}}; + {error, Error} -> + throw({target_create_error, DbName, Error, Map}) + end end, - case couch_server:lock(DbName, <<"shard splitting">>) of - ok -> - ok; - {error, Err} -> - throw({target_create_error, DbName, Err, Map}) - end, - {ok, Filepath} = couch_server:get_engine_path(DbName, Engine), - Opts = [create, ?ADMIN_CTX] ++ case Partitioned of - true -> [{props, [{partitioned, true}, {hash, [M, F, A]}]}]; - false -> [] - end, - case couch_db:start_link(Engine, DbName, Filepath, Opts) of - {ok, Db} -> - Map#{Key => #target{db = Db}}; - {error, Error} -> - throw({target_create_error, DbName, Error, Map}) - end - end, #{}, Targets0), + #{}, + Targets0 + ), Seq = couch_db:get_update_seq(SourceDb), State1 = #state{ source_db = SourceDb, @@ -166,24 +172,27 @@ split(SourceDb, Partitioned, Engine, Targets0, PickFun, {M, F, A} = HashFun) -> stop_targets(State6#state.targets), {ok, Seq}. - cleanup_targets(#{} = Targets, Engine) -> - maps:map(fun(_, #target{db = Db} = T) -> - ok = stop_target_db(Db), - DbName = couch_db:name(Db), - delete_target(DbName, Engine), - couch_server:unlock(DbName), - T - end, Targets). - + maps:map( + fun(_, #target{db = Db} = T) -> + ok = stop_target_db(Db), + DbName = couch_db:name(Db), + delete_target(DbName, Engine), + couch_server:unlock(DbName), + T + end, + Targets + ). stop_targets(#{} = Targets) -> - maps:map(fun(_, #target{db = Db} = T) -> - {ok, Db1} = couch_db_engine:commit_data(Db), - ok = stop_target_db(Db1), - T - end, Targets). - + maps:map( + fun(_, #target{db = Db} = T) -> + {ok, Db1} = couch_db_engine:commit_data(Db), + ok = stop_target_db(Db1), + T + end, + Targets + ). stop_target_db(Db) -> couch_db:close(Db), @@ -193,84 +202,91 @@ stop_target_db(Db) -> couch_server:unlock(couch_db:name(Db)), ok. - delete_target(DbName, Engine) -> RootDir = config:get("couchdb", "database_dir", "."), {ok, Filepath} = couch_server:get_engine_path(DbName, Engine), DelOpt = [{context, compaction}, sync], couch_db_engine:delete(Engine, RootDir, Filepath, DelOpt). - pick_target(DocId, #state{} = State, #{} = Targets) -> #state{pickfun = PickFun, hashfun = HashFun} = State, Key = PickFun(DocId, maps:keys(Targets), HashFun), {Key, maps:get(Key, Targets)}. - set_targets_update_seq(#state{targets = Targets} = State) -> Seq = couch_db:get_update_seq(State#state.source_db), - Targets1 = maps:map(fun(_, #target{db = Db} = Target) -> - {ok, Db1} = couch_db_engine:set_update_seq(Db, Seq), - Target#target{db = Db1} - end, Targets), + Targets1 = maps:map( + fun(_, #target{db = Db} = Target) -> + {ok, Db1} = couch_db_engine:set_update_seq(Db, Seq), + Target#target{db = Db1} + end, + Targets + ), State#state{targets = Targets1}. - copy_checkpoints(#state{} = State) -> #state{source_db = Db, source_uuid = SrcUUID, targets = Targets} = State, FoldFun = fun(#doc{id = Id} = Doc, Acc) -> - UpdatedAcc = case Id of - <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> -> - % Transform mem3 internal replicator checkpoints to avoid - % rewinding the changes feed when it sees the new shards - maps:map(fun(_, #target{uuid = TgtUUID, buffer = Docs} = T) -> - Doc1 = update_checkpoint_doc(SrcUUID, TgtUUID, Doc), - T#target{buffer = [Doc1 | Docs]} - end, Acc); - <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> -> - % Copy purge checkpoints to all shards - maps:map(fun(_, #target{buffer = Docs} = T) -> - T#target{buffer = [Doc | Docs]} - end, Acc); - <<?LOCAL_DOC_PREFIX, _/binary>> -> - % Skip copying these that will be done during - % local docs top off right before the shards are switched - Acc - end, + UpdatedAcc = + case Id of + <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> -> + % Transform mem3 internal replicator checkpoints to avoid + % rewinding the changes feed when it sees the new shards + maps:map( + fun(_, #target{uuid = TgtUUID, buffer = Docs} = T) -> + Doc1 = update_checkpoint_doc(SrcUUID, TgtUUID, Doc), + T#target{buffer = [Doc1 | Docs]} + end, + Acc + ); + <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> -> + % Copy purge checkpoints to all shards + maps:map( + fun(_, #target{buffer = Docs} = T) -> + T#target{buffer = [Doc | Docs]} + end, + Acc + ); + <<?LOCAL_DOC_PREFIX, _/binary>> -> + % Skip copying these that will be done during + % local docs top off right before the shards are switched + Acc + end, {ok, UpdatedAcc} end, {ok, Targets1} = couch_db_engine:fold_local_docs(Db, FoldFun, Targets, []), - Targets2 = maps:map(fun(_, #target{db = TDb, buffer = Docs} = T) -> - case Docs of - [] -> - T; - [_ | _] -> - Docs1 = lists:reverse(Docs), - {ok, TDb1} = couch_db_engine:write_doc_infos(TDb, [], Docs1), - {ok, TDb2} = couch_db_engine:commit_data(TDb1), - T#target{db = TDb2, buffer = []} - end - end, Targets1), + Targets2 = maps:map( + fun(_, #target{db = TDb, buffer = Docs} = T) -> + case Docs of + [] -> + T; + [_ | _] -> + Docs1 = lists:reverse(Docs), + {ok, TDb1} = couch_db_engine:write_doc_infos(TDb, [], Docs1), + {ok, TDb2} = couch_db_engine:commit_data(TDb1), + T#target{db = TDb2, buffer = []} + end + end, + Targets1 + ), State#state{targets = Targets2}. - update_checkpoint_doc(Old, New, #doc{body = {Props}} = Doc) -> - NewProps = case couch_util:get_value(<<"target_uuid">>, Props) of - Old -> - replace_kv(Props, {<<"target_uuid">>, Old, New}); - Other when is_binary(Other) -> - replace_kv(Props, {<<"source_uuid">>, Old, New}) - end, + NewProps = + case couch_util:get_value(<<"target_uuid">>, Props) of + Old -> + replace_kv(Props, {<<"target_uuid">>, Old, New}); + Other when is_binary(Other) -> + replace_kv(Props, {<<"source_uuid">>, Old, New}) + end, NewId = update_checkpoint_id(Doc#doc.id, Old, New), Doc#doc{id = NewId, body = {NewProps}}. - update_checkpoint_id(Id, Old, New) -> OldHash = mem3_rep:local_id_hash(Old), NewHash = mem3_rep:local_id_hash(New), binary:replace(Id, OldHash, NewHash). - replace_kv({[]}, _) -> {[]}; replace_kv({KVs}, Replacement) -> @@ -286,30 +302,33 @@ replace_kv({K, V}, Replacement) -> replace_kv(V, _) -> V. - copy_meta(#state{source_db = SourceDb, targets = Targets} = State) -> RevsLimit = couch_db:get_revs_limit(SourceDb), {SecProps} = couch_db:get_security(SourceDb), PurgeLimit = couch_db:get_purge_infos_limit(SourceDb), - Targets1 = maps:map(fun(_, #target{db = Db} = T) -> - {ok, Db1} = couch_db_engine:set_revs_limit(Db, RevsLimit), - {ok, Db2} = couch_db_engine:set_security(Db1, SecProps), - {ok, Db3} = couch_db_engine:set_purge_infos_limit(Db2, PurgeLimit), - T#target{db = Db3} - end, Targets), + Targets1 = maps:map( + fun(_, #target{db = Db} = T) -> + {ok, Db1} = couch_db_engine:set_revs_limit(Db, RevsLimit), + {ok, Db2} = couch_db_engine:set_security(Db1, SecProps), + {ok, Db3} = couch_db_engine:set_purge_infos_limit(Db2, PurgeLimit), + T#target{db = Db3} + end, + Targets + ), State#state{targets = Targets1}. - copy_purge_info(#state{source_db = Db} = State) -> Seq = max(0, couch_db:get_oldest_purge_seq(Db) - 1), {ok, NewState} = couch_db:fold_purge_infos(Db, Seq, fun purge_cb/2, State), - Targets = maps:map(fun(_, #target{} = T) -> - commit_purge_infos(T) - end, NewState#state.targets), + Targets = maps:map( + fun(_, #target{} = T) -> + commit_purge_infos(T) + end, + NewState#state.targets + ), NewState#state{targets = Targets}. - -acc_and_flush(Item, #target{}= Target, MaxBuffer, FlushCb) -> +acc_and_flush(Item, #target{} = Target, MaxBuffer, FlushCb) -> #target{buffer = Buffer, buffer_size = BSize} = Target, BSize1 = BSize + ?term_size(Item), Target1 = Target#target{buffer = [Item | Buffer], buffer_size = BSize1}, @@ -318,37 +337,34 @@ acc_and_flush(Item, #target{}= Target, MaxBuffer, FlushCb) -> false -> Target1 end. - purge_cb({_PSeq, _UUID, Id, _Revs} = PI, #state{targets = Targets} = State) -> {Key, Target} = pick_target(Id, State, Targets), MaxBuffer = State#state.max_buffer_size, Target1 = acc_and_flush(PI, Target, MaxBuffer, fun commit_purge_infos/1), {ok, State#state{targets = Targets#{Key => Target1}}}. - commit_purge_infos(#target{buffer = [], db = Db} = Target) -> Target#target{db = Db}; - commit_purge_infos(#target{buffer = PIs0, db = Db} = Target) -> PIs = lists:reverse(PIs0), {ok, Db1} = couch_db_engine:copy_purge_infos(Db, PIs), {ok, Db2} = couch_db_engine:commit_data(Db1), Target#target{buffer = [], buffer_size = 0, db = Db2}. - copy_docs(#state{source_db = Db} = State) -> {ok, NewState} = couch_db:fold_changes(Db, 0, fun changes_cb/2, State), - CommitTargets = maps:map(fun(_, #target{} = T) -> - commit_docs(T) - end, NewState#state.targets), + CommitTargets = maps:map( + fun(_, #target{} = T) -> + commit_docs(T) + end, + NewState#state.targets + ), NewState#state{targets = CommitTargets}. - % Backwards compatibility clause. Seq trees used to hold #doc_infos at one time changes_cb(#doc_info{id = Id}, #state{source_db = Db} = State) -> [FDI = #full_doc_info{}] = couch_db_engine:open_docs(Db, [Id]), changes_cb(FDI, State); - changes_cb(#full_doc_info{id = Id} = FDI, #state{} = State) -> #state{source_db = SourceDb, targets = Targets} = State, {Key, Target} = pick_target(Id, State, Targets), @@ -357,17 +373,14 @@ changes_cb(#full_doc_info{id = Id} = FDI, #state{} = State) -> Target1 = acc_and_flush(FDI1, Target, MaxBuffer, fun commit_docs/1), {ok, State#state{targets = Targets#{Key => Target1}}}. - commit_docs(#target{buffer = [], db = Db} = Target) -> Target#target{db = Db}; - commit_docs(#target{buffer = FDIs, db = Db} = Target) -> Pairs = [{not_found, FDI} || FDI <- lists:reverse(FDIs)], {ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, []), {ok, Db2} = couch_db_engine:commit_data(Db1), Target#target{buffer = [], buffer_size = 0, db = Db2}. - process_fdi(FDI, SourceDb, TargetDb) -> #full_doc_info{id = Id, rev_tree = RTree} = FDI, Acc = #racc{id = Id, source_db = SourceDb, target_db = TargetDb}, @@ -378,10 +391,8 @@ process_fdi(FDI, SourceDb, TargetDb) -> sizes = #size_info{active = Active, external = External} }. - revtree_cb(_Rev, _Leaf, branch, Acc) -> {[], Acc}; - revtree_cb({Pos, RevId}, Leaf, leaf, Acc) -> #racc{id = Id, source_db = SourceDb, target_db = TargetDb} = Acc, #leaf{deleted = Deleted, ptr = Ptr, sizes = LeafSizes} = Leaf, @@ -393,16 +404,20 @@ revtree_cb({Pos, RevId}, Leaf, leaf, Acc) -> }, Doc1 = couch_db_engine:read_doc_body(SourceDb, Doc0), #doc{body = Body, atts = AttInfos0} = Doc1, - External = case LeafSizes#size_info.external of - 0 when is_binary(Body) -> - couch_compress:uncompressed_size(Body); - 0 -> - couch_ejson_size:encoded_size(Body); - N -> N - end, - AttInfos = if not is_binary(AttInfos0) -> AttInfos0; true -> - couch_compress:decompress(AttInfos0) - end, + External = + case LeafSizes#size_info.external of + 0 when is_binary(Body) -> + couch_compress:uncompressed_size(Body); + 0 -> + couch_ejson_size:encoded_size(Body); + N -> + N + end, + AttInfos = + if + not is_binary(AttInfos0) -> AttInfos0; + true -> couch_compress:decompress(AttInfos0) + end, Atts = [process_attachment(Att, SourceDb, TargetDb) || Att <- AttInfos], Doc2 = Doc1#doc{atts = Atts}, Doc3 = couch_db_engine:serialize_doc(TargetDb, Doc2), @@ -417,42 +432,45 @@ revtree_cb({Pos, RevId}, Leaf, leaf, Acc) -> }, {NewLeaf, add_sizes(Active, External, AttSizes, Acc)}. - % This is copied almost verbatim from the compactor -process_attachment({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}, SourceDb, - TargetDb) -> +process_attachment( + {Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}, + SourceDb, + TargetDb +) -> % 010 upgrade code {ok, SrcStream} = couch_db_engine:open_read_stream(SourceDb, BinSp), {ok, DstStream} = couch_db_engine:open_write_stream(TargetDb, []), ok = couch_stream:copy(SrcStream, DstStream), {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} = - couch_stream:close(DstStream), + couch_stream:close(DstStream), {ok, NewBinSp} = couch_stream:to_disk_term(NewStream), couch_util:check_md5(ExpectedMd5, ActualMd5), {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity}; - -process_attachment({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, - Enc1}, SourceDb, TargetDb) -> +process_attachment( + {Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}, SourceDb, TargetDb +) -> {ok, SrcStream} = couch_db_engine:open_read_stream(SourceDb, BinSp), {ok, DstStream} = couch_db_engine:open_write_stream(TargetDb, []), ok = couch_stream:copy(SrcStream, DstStream), {NewStream, AttLen, _, ActualMd5, _IdentityMd5} = - couch_stream:close(DstStream), + couch_stream:close(DstStream), {ok, NewBinSp} = couch_stream:to_disk_term(NewStream), couch_util:check_md5(ExpectedMd5, ActualMd5), - Enc = case Enc1 of - true -> gzip; % 0110 upgrade code - false -> identity; % 0110 upgrade code - _ -> Enc1 - end, + Enc = + case Enc1 of + % 0110 upgrade code + true -> gzip; + % 0110 upgrade code + false -> identity; + _ -> Enc1 + end, {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}. - get_engine(Db) -> {ok, DbInfoProps} = couch_db:get_db_info(Db), proplists:get_value(engine, DbInfoProps). - add_sizes(Active, External, Atts, #racc{} = Acc) -> #racc{active = ActiveAcc, external = ExternalAcc, atts = AttsAcc} = Acc, NewActiveAcc = ActiveAcc + Active, @@ -464,41 +482,42 @@ add_sizes(Active, External, Atts, #racc{} = Acc) -> atts = NewAttsAcc }. - total_sizes(#racc{active = Active, external = External, atts = Atts}) -> TotalAtts = lists:foldl(fun({_, S}, A) -> S + A end, 0, Atts), {Active + TotalAtts, External + TotalAtts}. - get_max_buffer_size() -> config:get_integer("reshard", "split_buffer_size", ?DEFAULT_BUFFER_SIZE). - copy_local_docs(#state{source_db = Db, targets = Targets} = State) -> FoldFun = fun(#doc{id = Id} = Doc, Acc) -> - UpdatedAcc = case Id of - <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> -> - Acc; - <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> -> - Acc; - <<?LOCAL_DOC_PREFIX, _/binary>> -> - % Users' and replicator app's checkpoints go to their - % respective shards based on the general hashing algorithm - {Key, Target} = pick_target(Id, State, Acc), - #target{buffer = Docs} = Target, - Acc#{Key => Target#target{buffer = [Doc | Docs]}} - end, + UpdatedAcc = + case Id of + <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> -> + Acc; + <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> -> + Acc; + <<?LOCAL_DOC_PREFIX, _/binary>> -> + % Users' and replicator app's checkpoints go to their + % respective shards based on the general hashing algorithm + {Key, Target} = pick_target(Id, State, Acc), + #target{buffer = Docs} = Target, + Acc#{Key => Target#target{buffer = [Doc | Docs]}} + end, {ok, UpdatedAcc} end, {ok, Targets1} = couch_db:fold_local_docs(Db, FoldFun, Targets, []), - Targets2 = maps:map(fun(_, #target{db = TDb, buffer = Docs} = T) -> - case Docs of - [] -> - T; - [_ | _] -> - Docs1 = lists:reverse(Docs), - {ok, _} = couch_db:update_docs(TDb, Docs1), - T#target{buffer = []} - end - end, Targets1), + Targets2 = maps:map( + fun(_, #target{db = TDb, buffer = Docs} = T) -> + case Docs of + [] -> + T; + [_ | _] -> + Docs1 = lists:reverse(Docs), + {ok, _} = couch_db:update_docs(TDb, Docs1), + T#target{buffer = []} + end + end, + Targets1 + ), State#state{targets = Targets2}. diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl index 535acfad6..710b70510 100644 --- a/src/couch/src/couch_db_updater.erl +++ b/src/couch/src/couch_db_updater.erl @@ -15,14 +15,14 @@ -vsn(1). -export([add_sizes/3, upgrade_sizes/1]). --export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]). +-export([init/1, terminate/2, handle_call/3, handle_cast/2, code_change/3, handle_info/2]). -include_lib("couch/include/couch_db.hrl"). -include("couch_db_int.hrl"). -define(IDLE_LIMIT_DEFAULT, 61000). --define(DEFAULT_MAX_PARTITION_SIZE, 16#280000000). % 10 GiB - +% 10 GiB +-define(DEFAULT_MAX_PARTITION_SIZE, 16#280000000). -record(merge_acc, { revs_limit, @@ -33,7 +33,6 @@ full_partitions = [] }). - init({Engine, DbName, FilePath, Options0}) -> erlang:put(io_priority, {db_update, DbName}), update_idle_limit_from_config(), @@ -59,7 +58,6 @@ init({Engine, DbName, FilePath, Options0}) -> proc_lib:init_ack(InitError) end. - terminate(Reason, Db) -> couch_util:shutdown_sync(Db#db.compactor_pid), couch_db_engine:terminate(Reason, Db), @@ -81,7 +79,6 @@ handle_call(cancel_compact, _From, #db{compactor_pid = Pid} = Db) -> Db2 = Db#db{compactor_pid = nil}, ok = couch_server:db_updated(Db2), {reply, ok, Db2, idle_limit()}; - handle_call({set_security, NewSec}, _From, #db{} = Db) -> {ok, NewDb} = couch_db_engine:set_security(Db, NewSec), NewSecDb = commit_data(NewDb#db{ @@ -89,36 +86,38 @@ handle_call({set_security, NewSec}, _From, #db{} = Db) -> }), ok = couch_server:db_updated(NewSecDb), {reply, ok, NewSecDb, idle_limit()}; - handle_call({set_revs_limit, Limit}, _From, Db) -> {ok, Db2} = couch_db_engine:set_revs_limit(Db, Limit), Db3 = commit_data(Db2), ok = couch_server:db_updated(Db3), {reply, ok, Db3, idle_limit()}; - handle_call({set_purge_infos_limit, Limit}, _From, Db) -> {ok, Db2} = couch_db_engine:set_purge_infos_limit(Db, Limit), ok = couch_server:db_updated(Db2), {reply, ok, Db2, idle_limit()}; - handle_call({purge_docs, [], _}, _From, Db) -> {reply, {ok, []}, Db, idle_limit()}; - handle_call({purge_docs, PurgeReqs0, Options}, _From, Db) -> % Filter out any previously applied updates during % internal replication IsRepl = lists:member(replicated_changes, Options), - PurgeReqs = if not IsRepl -> PurgeReqs0; true -> - UUIDs = [UUID || {UUID, _Id, _Revs} <- PurgeReqs0], - PurgeInfos = couch_db_engine:load_purge_infos(Db, UUIDs), - lists:flatmap(fun - ({not_found, PReq}) -> [PReq]; - ({{_, _, _, _}, _}) -> [] - end, lists:zip(PurgeInfos, PurgeReqs0)) - end, + PurgeReqs = + if + not IsRepl -> + PurgeReqs0; + true -> + UUIDs = [UUID || {UUID, _Id, _Revs} <- PurgeReqs0], + PurgeInfos = couch_db_engine:load_purge_infos(Db, UUIDs), + lists:flatmap( + fun + ({not_found, PReq}) -> [PReq]; + ({{_, _, _, _}, _}) -> [] + end, + lists:zip(PurgeInfos, PurgeReqs0) + ) + end, {ok, NewDb, Replies} = purge_docs(Db, PurgeReqs), {reply, {ok, Replies}, NewDb, idle_limit()}; - handle_call(Msg, From, Db) -> case couch_db_engine:handle_db_updater_call(Msg, From, Db) of {reply, Resp, NewDb} -> @@ -127,7 +126,6 @@ handle_call(Msg, From, Db) -> Else end. - handle_cast({load_validation_funs, ValidationFuns}, Db) -> Db2 = Db#db{validate_doc_funs = ValidationFuns}, ok = couch_server:db_updated(Db2), @@ -152,65 +150,76 @@ handle_cast(start_compact, Db) -> handle_cast({compact_done, _Engine, CompactInfo}, #db{} = OldDb) -> {ok, NewDb} = couch_db_engine:finish_compaction(OldDb, CompactInfo), {noreply, NewDb}; - handle_cast(wakeup, Db) -> {noreply, Db, idle_limit()}; - handle_cast(Msg, #db{name = Name} = Db) -> - couch_log:error("Database `~s` updater received unexpected cast: ~p", - [Name, Msg]), + couch_log:error( + "Database `~s` updater received unexpected cast: ~p", + [Name, Msg] + ), {stop, Msg, Db}. - -handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts}, - Db) -> +handle_info( + {update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts}, + Db +) -> GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs), - if NonRepDocs == [] -> - {GroupedDocs3, Clients} = collect_updates(GroupedDocs2, - [Client], MergeConflicts); - true -> - GroupedDocs3 = GroupedDocs2, - Clients = [Client] + if + NonRepDocs == [] -> + {GroupedDocs3, Clients} = collect_updates( + GroupedDocs2, + [Client], + MergeConflicts + ); + true -> + GroupedDocs3 = GroupedDocs2, + Clients = [Client] end, NonRepDocs2 = [{Client, NRDoc} || NRDoc <- NonRepDocs], try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts) of - {ok, Db2, UpdatedDDocIds} -> - ok = couch_server:db_updated(Db2), - case {couch_db:get_update_seq(Db), couch_db:get_update_seq(Db2)} of - {Seq, Seq} -> ok; - _ -> couch_event:notify(Db2#db.name, updated) - end, - if NonRepDocs2 /= [] -> - couch_event:notify(Db2#db.name, local_updated); - true -> ok - end, - [catch(ClientPid ! {done, self()}) || ClientPid <- Clients], - Db3 = case length(UpdatedDDocIds) > 0 of - true -> - % Ken and ddoc_cache are the only things that - % use the unspecified ddoc_updated message. We - % should update them to use the new message per - % ddoc. - lists:foreach(fun(DDocId) -> - couch_event:notify(Db2#db.name, {ddoc_updated, DDocId}) - end, UpdatedDDocIds), - couch_event:notify(Db2#db.name, ddoc_updated), - ddoc_cache:refresh(Db2#db.name, UpdatedDDocIds), - refresh_validate_doc_funs(Db2); - false -> - Db2 - end, - {noreply, Db3, hibernate_if_no_idle_limit()} + {ok, Db2, UpdatedDDocIds} -> + ok = couch_server:db_updated(Db2), + case {couch_db:get_update_seq(Db), couch_db:get_update_seq(Db2)} of + {Seq, Seq} -> ok; + _ -> couch_event:notify(Db2#db.name, updated) + end, + if + NonRepDocs2 /= [] -> + couch_event:notify(Db2#db.name, local_updated); + true -> + ok + end, + [catch (ClientPid ! {done, self()}) || ClientPid <- Clients], + Db3 = + case length(UpdatedDDocIds) > 0 of + true -> + % Ken and ddoc_cache are the only things that + % use the unspecified ddoc_updated message. We + % should update them to use the new message per + % ddoc. + lists:foreach( + fun(DDocId) -> + couch_event:notify(Db2#db.name, {ddoc_updated, DDocId}) + end, + UpdatedDDocIds + ), + couch_event:notify(Db2#db.name, ddoc_updated), + ddoc_cache:refresh(Db2#db.name, UpdatedDDocIds), + refresh_validate_doc_funs(Db2); + false -> + Db2 + end, + {noreply, Db3, hibernate_if_no_idle_limit()} catch - throw: retry -> - [catch(ClientPid ! {retry, self()}) || ClientPid <- Clients], + throw:retry -> + [catch (ClientPid ! {retry, self()}) || ClientPid <- Clients], {noreply, Db, hibernate_if_no_idle_limit()} end; handle_info({'EXIT', _Pid, normal}, Db) -> {noreply, Db, idle_limit()}; handle_info({'EXIT', _Pid, Reason}, Db) -> {stop, Reason, Db}; -handle_info(timeout, #db{name=DbName} = Db) -> +handle_info(timeout, #db{name = DbName} = Db) -> IdleLimitMSec = update_idle_limit_from_config(), case couch_db:is_idle(Db) of true -> @@ -230,7 +239,6 @@ handle_info(timeout, #db{name=DbName} = Db) -> % force a thorough garbage collection. gen_server:cast(self(), wakeup), {noreply, Db, hibernate}; - handle_info(Msg, Db) -> case couch_db_engine:handle_db_updater_info(Msg, Db) of {noreply, NewDb} -> @@ -239,7 +247,6 @@ handle_info(Msg, Db) -> Else end. - code_change(_OldVsn, State, _Extra) -> {ok, State}. @@ -248,25 +255,28 @@ sort_and_tag_grouped_docs(Client, GroupedDocs) -> % The merge_updates function will fail and the database can end up with % duplicate documents if the incoming groups are not sorted, so as a sanity % check we sort them again here. See COUCHDB-2735. - Cmp = fun([#doc{id=A}|_], [#doc{id=B}|_]) -> A < B end, - lists:map(fun(DocGroup) -> - [{Client, maybe_tag_doc(D)} || D <- DocGroup] - end, lists:sort(Cmp, GroupedDocs)). + Cmp = fun([#doc{id = A} | _], [#doc{id = B} | _]) -> A < B end, + lists:map( + fun(DocGroup) -> + [{Client, maybe_tag_doc(D)} || D <- DocGroup] + end, + lists:sort(Cmp, GroupedDocs) + ). -maybe_tag_doc(#doc{id=Id, revs={Pos,[_Rev|PrevRevs]}, meta=Meta0}=Doc) -> +maybe_tag_doc(#doc{id = Id, revs = {Pos, [_Rev | PrevRevs]}, meta = Meta0} = Doc) -> case lists:keymember(ref, 1, Meta0) of true -> Doc; false -> - Key = {Id, {Pos-1, PrevRevs}}, - Doc#doc{meta=[{ref, Key} | Meta0]} + Key = {Id, {Pos - 1, PrevRevs}}, + Doc#doc{meta = [{ref, Key} | Meta0]} end. -merge_updates([[{_,#doc{id=X}}|_]=A|RestA], [[{_,#doc{id=X}}|_]=B|RestB]) -> - [A++B | merge_updates(RestA, RestB)]; -merge_updates([[{_,#doc{id=X}}|_]|_]=A, [[{_,#doc{id=Y}}|_]|_]=B) when X < Y -> +merge_updates([[{_, #doc{id = X}} | _] = A | RestA], [[{_, #doc{id = X}} | _] = B | RestB]) -> + [A ++ B | merge_updates(RestA, RestB)]; +merge_updates([[{_, #doc{id = X}} | _] | _] = A, [[{_, #doc{id = Y}} | _] | _] = B) when X < Y -> [hd(A) | merge_updates(tl(A), B)]; -merge_updates([[{_,#doc{id=X}}|_]|_]=A, [[{_,#doc{id=Y}}|_]|_]=B) when X > Y -> +merge_updates([[{_, #doc{id = X}} | _] | _] = A, [[{_, #doc{id = Y}} | _] | _] = B) when X > Y -> [hd(B) | merge_updates(A, tl(B))]; merge_updates([], RestB) -> RestB; @@ -283,18 +293,24 @@ collect_updates(GroupedDocsAcc, ClientsAcc, MergeConflicts) -> GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs), GroupedDocsAcc2 = merge_updates(GroupedDocsAcc, GroupedDocs2), - collect_updates(GroupedDocsAcc2, [Client | ClientsAcc], - MergeConflicts) + collect_updates( + GroupedDocsAcc2, + [Client | ClientsAcc], + MergeConflicts + ) after 0 -> {GroupedDocsAcc, ClientsAcc} end. - init_db(DbName, FilePath, EngineState, Options) -> % convert start time tuple to microsecs and store as a binary string {MegaSecs, Secs, MicroSecs} = os:timestamp(), - StartTime = ?l2b(io_lib:format("~p", - [(MegaSecs*1000000*1000000) + (Secs*1000000) + MicroSecs])), + StartTime = ?l2b( + io_lib:format( + "~p", + [(MegaSecs * 1000000 * 1000000) + (Secs * 1000000) + MicroSecs] + ) + ), BDU = couch_util:get_value(before_doc_update, Options, nil), ADR = couch_util:get_value(after_doc_read, Options, nil), @@ -319,31 +335,36 @@ init_db(DbName, FilePath, EngineState, Options) -> options = lists:keystore(props, 1, NonCreateOpts, {props, DbProps}) }. - refresh_validate_doc_funs(#db{name = <<"shards/", _/binary>> = Name} = Db) -> spawn(fabric, reset_validation_funs, [mem3:dbname(Name)]), Db#db{validate_doc_funs = undefined}; refresh_validate_doc_funs(Db0) -> - Db = Db0#db{user_ctx=?ADMIN_USER}, + Db = Db0#db{user_ctx = ?ADMIN_USER}, {ok, DesignDocs} = couch_db:get_design_docs(Db), ProcessDocFuns = lists:flatmap( fun(DesignDocInfo) -> {ok, DesignDoc} = couch_db:open_doc_int( - Db, DesignDocInfo, [ejson_body]), + Db, DesignDocInfo, [ejson_body] + ), case couch_doc:get_validate_doc_fun(DesignDoc) of - nil -> []; - Fun -> [Fun] + nil -> []; + Fun -> [Fun] end - end, DesignDocs), - Db#db{validate_doc_funs=ProcessDocFuns}. + end, + DesignDocs + ), + Db#db{validate_doc_funs = ProcessDocFuns}. % rev tree functions flush_trees(_Db, [], AccFlushedTrees) -> {ok, lists:reverse(AccFlushedTrees)}; -flush_trees(#db{} = Db, - [InfoUnflushed | RestUnflushed], AccFlushed) -> - #full_doc_info{update_seq=UpdateSeq, rev_tree=Unflushed} = InfoUnflushed, +flush_trees( + #db{} = Db, + [InfoUnflushed | RestUnflushed], + AccFlushed +) -> + #full_doc_info{update_seq = UpdateSeq, rev_tree = Unflushed} = InfoUnflushed, {Flushed, FinalAcc} = couch_key_tree:mapfold( fun(_Rev, Value, Type, SizesAcc) -> case Value of @@ -353,9 +374,9 @@ flush_trees(#db{} = Db, check_doc_atts(Db, Doc), ExternalSize = get_meta_body_size(Value#doc.meta), {size_info, AttSizeInfo} = - lists:keyfind(size_info, 1, Doc#doc.meta), + lists:keyfind(size_info, 1, Doc#doc.meta), {ok, NewDoc, WrittenSize} = - couch_db_engine:write_doc_body(Db, Doc), + couch_db_engine:write_doc_body(Db, Doc), Leaf = #leaf{ deleted = Doc#doc.deleted, ptr = NewDoc#doc.body, @@ -372,7 +393,10 @@ flush_trees(#db{} = Db, _ -> {Value, SizesAcc} end - end, {0, 0, []}, Unflushed), + end, + {0, 0, []}, + Unflushed + ), {FinalAS, FinalES, FinalAtts} = FinalAcc, TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts), NewInfo = InfoUnflushed#full_doc_info{ @@ -384,30 +408,34 @@ flush_trees(#db{} = Db, }, flush_trees(Db, RestUnflushed, [NewInfo | AccFlushed]). - check_doc_atts(Db, Doc) -> {atts_stream, Stream} = lists:keyfind(atts_stream, 1, Doc#doc.meta), % Make sure that the attachments were written to the currently % active attachment stream. If compaction swaps during a write % request we may have to rewrite our attachment bodies. - if Stream == nil -> ok; true -> - case couch_db:is_active_stream(Db, Stream) of - true -> - ok; - false -> - % Stream where the attachments were written to is - % no longer the current attachment stream. This - % can happen when a database is switched at - % compaction time. - couch_log:debug("Stream where the attachments were" - " written has changed." - " Possibly retrying.", []), - throw(retry) - end + if + Stream == nil -> + ok; + true -> + case couch_db:is_active_stream(Db, Stream) of + true -> + ok; + false -> + % Stream where the attachments were written to is + % no longer the current attachment stream. This + % can happen when a database is switched at + % compaction time. + couch_log:debug( + "Stream where the attachments were" + " written has changed." + " Possibly retrying.", + [] + ), + throw(retry) + end end. - -add_sizes(Type, #leaf{sizes=Sizes, atts=AttSizes}, Acc) -> +add_sizes(Type, #leaf{sizes = Sizes, atts = AttSizes}, Acc) -> % Maybe upgrade from disk_size only #size_info{ active = ActiveSize, @@ -415,24 +443,27 @@ add_sizes(Type, #leaf{sizes=Sizes, atts=AttSizes}, Acc) -> } = upgrade_sizes(Sizes), {ASAcc, ESAcc, AttsAcc} = Acc, NewASAcc = ActiveSize + ASAcc, - NewESAcc = ESAcc + if Type == leaf -> ExternalSize; true -> 0 end, + NewESAcc = + ESAcc + + if + Type == leaf -> ExternalSize; + true -> 0 + end, NewAttsAcc = lists:umerge(AttSizes, AttsAcc), {NewASAcc, NewESAcc, NewAttsAcc}. - -upgrade_sizes(#size_info{}=SI) -> +upgrade_sizes(#size_info{} = SI) -> SI; upgrade_sizes({D, E}) -> - #size_info{active=D, external=E}; + #size_info{active = D, external = E}; upgrade_sizes(S) when is_integer(S) -> - #size_info{active=S, external=0}. - + #size_info{active = S, external = 0}. send_result(Client, Doc, NewResult) -> % used to send a result to the client - catch(Client ! {result, self(), {doc_tag(Doc), NewResult}}). + catch (Client ! {result, self(), {doc_tag(Doc), NewResult}}). -doc_tag(#doc{meta=Meta}) -> +doc_tag(#doc{meta = Meta}) -> case lists:keyfind(ref, 1, Meta) of {ref, Ref} -> Ref; false -> throw(no_doc_tag); @@ -452,17 +483,21 @@ merge_rev_trees([NewDocs | RestDocsList], [OldDocInfo | RestOldInfo], Acc) -> % Track doc ids so we can debug large revision trees erlang:put(last_id_merged, OldDocInfo#full_doc_info.id), - NewDocInfo0 = lists:foldl(fun({Client, NewDoc}, OldInfoAcc) -> - NewInfo = merge_rev_tree(OldInfoAcc, NewDoc, Client, MergeConflicts), - case is_overflowed(NewInfo, OldInfoAcc, FullPartitions) of - true when not MergeConflicts -> - DocId = NewInfo#full_doc_info.id, - send_result(Client, NewDoc, {partition_overflow, DocId}), - OldInfoAcc; - _ -> - NewInfo - end - end, OldDocInfo, NewDocs), + NewDocInfo0 = lists:foldl( + fun({Client, NewDoc}, OldInfoAcc) -> + NewInfo = merge_rev_tree(OldInfoAcc, NewDoc, Client, MergeConflicts), + case is_overflowed(NewInfo, OldInfoAcc, FullPartitions) of + true when not MergeConflicts -> + DocId = NewInfo#full_doc_info.id, + send_result(Client, NewDoc, {partition_overflow, DocId}), + OldInfoAcc; + _ -> + NewInfo + end + end, + OldDocInfo, + NewDocs + ), NewDocInfo1 = maybe_stem_full_doc_info(NewDocInfo0, Limit), % When MergeConflicts is false, we updated #full_doc_info.deleted on every % iteration of merge_rev_tree. However, merge_rev_tree does not update @@ -470,39 +505,43 @@ merge_rev_trees([NewDocs | RestDocsList], [OldDocInfo | RestOldInfo], Acc) -> % to know whether the doc is deleted between iterations. Since we still % need to know if the doc is deleted after the merge happens, we have to % set it here. - NewDocInfo2 = case MergeConflicts of - true -> - NewDocInfo1#full_doc_info{ - deleted = couch_doc:is_deleted(NewDocInfo1) - }; - false -> - NewDocInfo1 - end, - if NewDocInfo2 == OldDocInfo -> - % nothing changed - merge_rev_trees(RestDocsList, RestOldInfo, Acc); - true -> - % We have updated the document, give it a new update_seq. Its - % important to note that the update_seq on OldDocInfo should - % be identical to the value on NewDocInfo1. - OldSeq = OldDocInfo#full_doc_info.update_seq, - NewDocInfo3 = NewDocInfo2#full_doc_info{ - update_seq = Acc#merge_acc.cur_seq + 1 - }, - RemoveSeqs = case OldSeq of - 0 -> Acc#merge_acc.rem_seqs; - _ -> [OldSeq | Acc#merge_acc.rem_seqs] + NewDocInfo2 = + case MergeConflicts of + true -> + NewDocInfo1#full_doc_info{ + deleted = couch_doc:is_deleted(NewDocInfo1) + }; + false -> + NewDocInfo1 end, - NewAcc = Acc#merge_acc{ - add_infos = [NewDocInfo3 | Acc#merge_acc.add_infos], - rem_seqs = RemoveSeqs, - cur_seq = Acc#merge_acc.cur_seq + 1 - }, - merge_rev_trees(RestDocsList, RestOldInfo, NewAcc) + if + NewDocInfo2 == OldDocInfo -> + % nothing changed + merge_rev_trees(RestDocsList, RestOldInfo, Acc); + true -> + % We have updated the document, give it a new update_seq. Its + % important to note that the update_seq on OldDocInfo should + % be identical to the value on NewDocInfo1. + OldSeq = OldDocInfo#full_doc_info.update_seq, + NewDocInfo3 = NewDocInfo2#full_doc_info{ + update_seq = Acc#merge_acc.cur_seq + 1 + }, + RemoveSeqs = + case OldSeq of + 0 -> Acc#merge_acc.rem_seqs; + _ -> [OldSeq | Acc#merge_acc.rem_seqs] + end, + NewAcc = Acc#merge_acc{ + add_infos = [NewDocInfo3 | Acc#merge_acc.add_infos], + rem_seqs = RemoveSeqs, + cur_seq = Acc#merge_acc.cur_seq + 1 + }, + merge_rev_trees(RestDocsList, RestOldInfo, NewAcc) end. -merge_rev_tree(OldInfo, NewDoc, Client, false) - when OldInfo#full_doc_info.deleted -> +merge_rev_tree(OldInfo, NewDoc, Client, false) when + OldInfo#full_doc_info.deleted +-> % We're recreating a document that was previously % deleted. To check that this is a recreation from % the root we assert that the new document has a @@ -517,28 +556,29 @@ merge_rev_tree(OldInfo, NewDoc, Client, false) case RevDepth == 1 andalso not NewDeleted of true -> % Update the new doc based on revisions in OldInfo - #doc_info{revs=[WinningRev | _]} = couch_doc:to_doc_info(OldInfo), - #rev_info{rev={OldPos, OldRev}} = WinningRev, - Body = case couch_util:get_value(comp_body, NewDoc#doc.meta) of - CompBody when is_binary(CompBody) -> - couch_compress:decompress(CompBody); - _ -> - NewDoc#doc.body - end, + #doc_info{revs = [WinningRev | _]} = couch_doc:to_doc_info(OldInfo), + #rev_info{rev = {OldPos, OldRev}} = WinningRev, + Body = + case couch_util:get_value(comp_body, NewDoc#doc.meta) of + CompBody when is_binary(CompBody) -> + couch_compress:decompress(CompBody); + _ -> + NewDoc#doc.body + end, RevIdDoc = NewDoc#doc{ revs = {OldPos, [OldRev]}, body = Body }, NewRevId = couch_db:new_revid(RevIdDoc), - NewDoc2 = NewDoc#doc{revs={OldPos + 1, [NewRevId, OldRev]}}, + NewDoc2 = NewDoc#doc{revs = {OldPos + 1, [NewRevId, OldRev]}}, % Merge our modified new doc into the tree - #full_doc_info{rev_tree=OldTree} = OldInfo, + #full_doc_info{rev_tree = OldTree} = OldInfo, NewTree0 = couch_doc:to_path(NewDoc2), case couch_key_tree:merge(OldTree, NewTree0) of {NewTree1, new_leaf} -> % We changed the revision id so inform the caller - send_result(Client, NewDoc, {ok, {OldPos+1, NewRevId}}), + send_result(Client, NewDoc, {ok, {OldPos + 1, NewRevId}}), OldInfo#full_doc_info{ rev_tree = NewTree1, deleted = false @@ -615,34 +655,44 @@ update_docs_int(Db, DocsList, LocalDocs, MergeConflicts) -> UpdateSeq = couch_db_engine:get_update_seq(Db), RevsLimit = couch_db_engine:get_revs_limit(Db), - Ids = [Id || [{_Client, #doc{id=Id}}|_] <- DocsList], + Ids = [Id || [{_Client, #doc{id = Id}} | _] <- DocsList], % lookup up the old documents, if they exist. OldDocLookups = couch_db_engine:open_docs(Db, Ids), - OldDocInfos = lists:zipwith(fun - (_Id, #full_doc_info{} = FDI) -> - FDI; - (Id, not_found) -> - #full_doc_info{id=Id} - end, Ids, OldDocLookups), + OldDocInfos = lists:zipwith( + fun + (_Id, #full_doc_info{} = FDI) -> + FDI; + (Id, not_found) -> + #full_doc_info{id = Id} + end, + Ids, + OldDocLookups + ), %% Get the list of full partitions - FullPartitions = case couch_db:is_partitioned(Db) of - true -> - case max_partition_size() of - N when N =< 0 -> - []; - Max -> - Partitions = lists:usort(lists:flatmap(fun(Id) -> - case couch_partition:extract(Id) of - undefined -> []; - {Partition, _} -> [Partition] - end - end, Ids)), - [P || P <- Partitions, partition_size(Db, P) >= Max] - end; - false -> - [] - end, + FullPartitions = + case couch_db:is_partitioned(Db) of + true -> + case max_partition_size() of + N when N =< 0 -> + []; + Max -> + Partitions = lists:usort( + lists:flatmap( + fun(Id) -> + case couch_partition:extract(Id) of + undefined -> []; + {Partition, _} -> [Partition] + end + end, + Ids + ) + ), + [P || P <- Partitions, partition_size(Db, P) >= Max] + end; + false -> + [] + end, % Merge the new docs into the revision trees. AccIn = #merge_acc{ @@ -668,8 +718,10 @@ update_docs_int(Db, DocsList, LocalDocs, MergeConflicts) -> {ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, LocalDocs2), WriteCount = length(IndexFDIs), - couch_stats:increment_counter([couchdb, document_inserts], - WriteCount - length(RemSeqs)), + couch_stats:increment_counter( + [couchdb, document_inserts], + WriteCount - length(RemSeqs) + ), couch_stats:increment_counter([couchdb, document_writes], WriteCount), couch_stats:increment_counter( [couchdb, local_document_writes], @@ -678,26 +730,31 @@ update_docs_int(Db, DocsList, LocalDocs, MergeConflicts) -> % Check if we just updated any design documents, and update the validation % funs if we did. - UpdatedDDocIds = lists:flatmap(fun - (<<"_design/", _/binary>> = Id) -> [Id]; - (_) -> [] - end, Ids), + UpdatedDDocIds = lists:flatmap( + fun + (<<"_design/", _/binary>> = Id) -> [Id]; + (_) -> [] + end, + Ids + ), {ok, commit_data(Db1), UpdatedDDocIds}. - update_local_doc_revs(Docs) -> - lists:foldl(fun({Client, Doc}, Acc) -> - case increment_local_doc_revs(Doc) of - {ok, #doc{revs = {0, [NewRev]}} = NewDoc} -> - send_result(Client, Doc, {ok, {0, integer_to_binary(NewRev)}}), - [NewDoc | Acc]; - {error, Error} -> - send_result(Client, Doc, {error, Error}), - Acc - end - end, [], Docs). - + lists:foldl( + fun({Client, Doc}, Acc) -> + case increment_local_doc_revs(Doc) of + {ok, #doc{revs = {0, [NewRev]}} = NewDoc} -> + send_result(Client, Doc, {ok, {0, integer_to_binary(NewRev)}}), + [NewDoc | Acc]; + {error, Error} -> + send_result(Client, Doc, {error, Error}), + Acc + end + end, + [], + Docs + ). increment_local_doc_revs(#doc{deleted = true} = Doc) -> {ok, Doc#doc{revs = {0, [0]}}}; @@ -707,15 +764,19 @@ increment_local_doc_revs(#doc{revs = {0, [RevStr | _]}} = Doc) -> try PrevRev = binary_to_integer(RevStr), {ok, Doc#doc{revs = {0, [PrevRev + 1]}}} - catch error:badarg -> - {error, <<"Invalid rev format">>} + catch + error:badarg -> + {error, <<"Invalid rev format">>} end; increment_local_doc_revs(#doc{}) -> {error, <<"Invalid rev format">>}. max_partition_size() -> - config:get_integer("couchdb", "max_partition_size", - ?DEFAULT_MAX_PARTITION_SIZE). + config:get_integer( + "couchdb", + "max_partition_size", + ?DEFAULT_MAX_PARTITION_SIZE + ). partition_size(Db, Partition) -> {ok, Info} = couch_db:get_partition_info(Db, Partition), @@ -750,7 +811,6 @@ estimate_size(#full_doc_info{} = FDI) -> purge_docs(Db, []) -> {ok, Db, []}; - purge_docs(Db, PurgeReqs) -> Ids = lists:usort(lists:map(fun({_UUID, Id, _Revs}) -> Id end, PurgeReqs)), FDIs = couch_db_engine:open_docs(Db, Ids), @@ -759,23 +819,30 @@ purge_docs(Db, PurgeReqs) -> IdFDIs = lists:zip(Ids, FDIs), {NewIdFDIs, Replies} = apply_purge_reqs(PurgeReqs, IdFDIs, USeq, []), - Pairs = lists:flatmap(fun({DocId, OldFDI}) -> - {DocId, NewFDI} = lists:keyfind(DocId, 1, NewIdFDIs), - case {OldFDI, NewFDI} of - {not_found, not_found} -> - []; - {#full_doc_info{} = A, #full_doc_info{} = A} -> - []; - {#full_doc_info{}, _} -> - [{OldFDI, NewFDI}] - end - end, IdFDIs), + Pairs = lists:flatmap( + fun({DocId, OldFDI}) -> + {DocId, NewFDI} = lists:keyfind(DocId, 1, NewIdFDIs), + case {OldFDI, NewFDI} of + {not_found, not_found} -> + []; + {#full_doc_info{} = A, #full_doc_info{} = A} -> + []; + {#full_doc_info{}, _} -> + [{OldFDI, NewFDI}] + end + end, + IdFDIs + ), PSeq = couch_db_engine:get_purge_seq(Db), - {RevPInfos, _} = lists:foldl(fun({UUID, DocId, Revs}, {PIAcc, PSeqAcc}) -> - Info = {PSeqAcc + 1, UUID, DocId, Revs}, - {[Info | PIAcc], PSeqAcc + 1} - end, {[], PSeq}, PurgeReqs), + {RevPInfos, _} = lists:foldl( + fun({UUID, DocId, Revs}, {PIAcc, PSeqAcc}) -> + Info = {PSeqAcc + 1, UUID, DocId, Revs}, + {[Info | PIAcc], PSeqAcc + 1} + end, + {[], PSeq}, + PurgeReqs + ), PInfos = lists:reverse(RevPInfos), {ok, Db1} = couch_db_engine:purge_docs(Db, Pairs, PInfos), @@ -784,85 +851,90 @@ purge_docs(Db, PurgeReqs) -> couch_event:notify(Db2#db.name, updated), {ok, Db2, Replies}. - apply_purge_reqs([], IdFDIs, _USeq, Replies) -> {IdFDIs, lists:reverse(Replies)}; - apply_purge_reqs([Req | RestReqs], IdFDIs, USeq, Replies) -> {_UUID, DocId, Revs} = Req, {value, {_, FDI0}, RestIdFDIs} = lists:keytake(DocId, 1, IdFDIs), - {NewFDI, RemovedRevs, NewUSeq} = case FDI0 of - #full_doc_info{rev_tree = Tree} -> - case couch_key_tree:remove_leafs(Tree, Revs) of - {_, []} -> - % No change - {FDI0, [], USeq}; - {[], Removed} -> - % Completely purged - {not_found, Removed, USeq}; - {NewTree, Removed} -> - % Its possible to purge the #leaf{} that contains - % the update_seq where this doc sits in the - % update_seq sequence. Rather than do a bunch of - % complicated checks we just re-label every #leaf{} - % and reinsert it into the update_seq sequence. - {NewTree2, NewUpdateSeq} = couch_key_tree:mapfold(fun - (_RevId, Leaf, leaf, SeqAcc) -> - {Leaf#leaf{seq = SeqAcc + 1}, - SeqAcc + 1}; - (_RevId, Value, _Type, SeqAcc) -> - {Value, SeqAcc} - end, USeq, NewTree), - - FDI1 = FDI0#full_doc_info{ - update_seq = NewUpdateSeq, - rev_tree = NewTree2 - }, - {FDI1, Removed, NewUpdateSeq} - end; - not_found -> - % Not found means nothing to change - {not_found, [], USeq} - end, + {NewFDI, RemovedRevs, NewUSeq} = + case FDI0 of + #full_doc_info{rev_tree = Tree} -> + case couch_key_tree:remove_leafs(Tree, Revs) of + {_, []} -> + % No change + {FDI0, [], USeq}; + {[], Removed} -> + % Completely purged + {not_found, Removed, USeq}; + {NewTree, Removed} -> + % Its possible to purge the #leaf{} that contains + % the update_seq where this doc sits in the + % update_seq sequence. Rather than do a bunch of + % complicated checks we just re-label every #leaf{} + % and reinsert it into the update_seq sequence. + {NewTree2, NewUpdateSeq} = couch_key_tree:mapfold( + fun + (_RevId, Leaf, leaf, SeqAcc) -> + {Leaf#leaf{seq = SeqAcc + 1}, SeqAcc + 1}; + (_RevId, Value, _Type, SeqAcc) -> + {Value, SeqAcc} + end, + USeq, + NewTree + ), + + FDI1 = FDI0#full_doc_info{ + update_seq = NewUpdateSeq, + rev_tree = NewTree2 + }, + {FDI1, Removed, NewUpdateSeq} + end; + not_found -> + % Not found means nothing to change + {not_found, [], USeq} + end, NewIdFDIs = [{DocId, NewFDI} | RestIdFDIs], NewReplies = [{ok, RemovedRevs} | Replies], apply_purge_reqs(RestReqs, NewIdFDIs, NewUSeq, NewReplies). - commit_data(Db) -> {ok, Db1} = couch_db_engine:commit_data(Db), Db1#db{ committed_update_seq = couch_db_engine:get_update_seq(Db) }. - pair_write_info(Old, New) -> - lists:map(fun(FDI) -> - case lists:keyfind(FDI#full_doc_info.id, #full_doc_info.id, Old) of - #full_doc_info{} = OldFDI -> {OldFDI, FDI}; - false -> {not_found, FDI} - end - end, New). - + lists:map( + fun(FDI) -> + case lists:keyfind(FDI#full_doc_info.id, #full_doc_info.id, Old) of + #full_doc_info{} = OldFDI -> {OldFDI, FDI}; + false -> {not_found, FDI} + end + end, + New + ). get_meta_body_size(Meta) -> {ejson_size, ExternalSize} = lists:keyfind(ejson_size, 1, Meta), ExternalSize. - default_security_object(<<"shards/", _/binary>>) -> case config:get("couchdb", "default_security", "admin_only") of "admin_only" -> - [{<<"members">>,{[{<<"roles">>,[<<"_admin">>]}]}}, - {<<"admins">>,{[{<<"roles">>,[<<"_admin">>]}]}}]; + [ + {<<"members">>, {[{<<"roles">>, [<<"_admin">>]}]}}, + {<<"admins">>, {[{<<"roles">>, [<<"_admin">>]}]}} + ]; Everyone when Everyone == "everyone"; Everyone == "admin_local" -> [] end; default_security_object(_DbName) -> case config:get("couchdb", "default_security", "admin_only") of Admin when Admin == "admin_only"; Admin == "admin_local" -> - [{<<"members">>,{[{<<"roles">>,[<<"_admin">>]}]}}, - {<<"admins">>,{[{<<"roles">>,[<<"_admin">>]}]}}]; + [ + {<<"members">>, {[{<<"roles">>, [<<"_admin">>]}]}}, + {<<"admins">>, {[{<<"roles">>, [<<"_admin">>]}]}} + ]; "everyone" -> [] end. @@ -873,12 +945,13 @@ default_security_object(_DbName) -> % Storage Engine) code lands this should be moved to the #db{} record. update_idle_limit_from_config() -> Default = integer_to_list(?IDLE_LIMIT_DEFAULT), - IdleLimit = case config:get("couchdb", "idle_check_timeout", Default) of - "infinity" -> - infinity; - Milliseconds -> - list_to_integer(Milliseconds) - end, + IdleLimit = + case config:get("couchdb", "idle_check_timeout", Default) of + "infinity" -> + infinity; + Milliseconds -> + list_to_integer(Milliseconds) + end, put(idle_limit, IdleLimit), IdleLimit. @@ -893,11 +966,9 @@ hibernate_if_no_idle_limit() -> Timeout end. - -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). - update_local_doc_revs_test_() -> {inparallel, [ {"Test local doc with valid rev", fun t_good_local_doc/0}, @@ -905,7 +976,6 @@ update_local_doc_revs_test_() -> {"Test deleted local doc", fun t_dead_local_doc/0} ]}. - t_good_local_doc() -> Doc = #doc{ id = <<"_local/alice">>, @@ -915,23 +985,23 @@ t_good_local_doc() -> [NewDoc] = update_local_doc_revs([{self(), Doc}]), ?assertEqual({0, [2]}, NewDoc#doc.revs), {ok, Result} = receive_result(Doc), - ?assertEqual({ok,{0,<<"2">>}}, Result). - + ?assertEqual({ok, {0, <<"2">>}}, Result). t_bad_local_doc() -> - lists:foreach(fun(BadRevs) -> - Doc = #doc{ - id = <<"_local/alice">>, - revs = BadRevs, - meta = [{ref, make_ref()}] - }, - NewDocs = update_local_doc_revs([{self(), Doc}]), - ?assertEqual([], NewDocs), - {ok, Result} = receive_result(Doc), - ?assertEqual({error,<<"Invalid rev format">>}, Result) - end, [{0, [<<"a">>]}, {1, [<<"1">>]}]). - - + lists:foreach( + fun(BadRevs) -> + Doc = #doc{ + id = <<"_local/alice">>, + revs = BadRevs, + meta = [{ref, make_ref()}] + }, + NewDocs = update_local_doc_revs([{self(), Doc}]), + ?assertEqual([], NewDocs), + {ok, Result} = receive_result(Doc), + ?assertEqual({error, <<"Invalid rev format">>}, Result) + end, + [{0, [<<"a">>]}, {1, [<<"1">>]}] + ). t_dead_local_doc() -> Doc = #doc{ @@ -943,8 +1013,7 @@ t_dead_local_doc() -> [NewDoc] = update_local_doc_revs([{self(), Doc}]), ?assertEqual({0, [0]}, NewDoc#doc.revs), {ok, Result} = receive_result(Doc), - ?assertEqual({ok,{0,<<"0">>}}, Result). - + ?assertEqual({ok, {0, <<"0">>}}, Result). receive_result(#doc{meta = Meta}) -> Ref = couch_util:get_value(ref, Meta), diff --git a/src/couch/src/couch_debug.erl b/src/couch/src/couch_debug.erl index 290d095bf..a2f4cdc87 100644 --- a/src/couch/src/couch_debug.erl +++ b/src/couch/src/couch_debug.erl @@ -49,6 +49,7 @@ help() -> ]. -spec help(Function :: atom()) -> ok. +%% erlfmt-ignore help(opened_files) -> io:format(" opened_files() @@ -205,9 +206,11 @@ help(Unknown) -> [{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}]. opened_files() -> - Info = [couch_file_port_info(Port) - || Port <- erlang:ports(), - {name, "efile"} =:= erlang:port_info(Port, name)], + Info = [ + couch_file_port_info(Port) + || Port <- erlang:ports(), + {name, "efile"} =:= erlang:port_info(Port, name) + ], [I || I <- Info, is_tuple(I)]. couch_file_port_info(Port) -> @@ -223,17 +226,22 @@ couch_file_port_info(Port) -> [{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}]. opened_files_by_regexp(FileRegExp) -> {ok, RegExp} = re:compile(FileRegExp), - lists:filter(fun({_Port, _Pid, _Fd, Path}) -> - re:run(Path, RegExp) =/= nomatch - end, couch_debug:opened_files()). + lists:filter( + fun({_Port, _Pid, _Fd, Path}) -> + re:run(Path, RegExp) =/= nomatch + end, + couch_debug:opened_files() + ). -spec opened_files_contains(FileNameFragment :: iodata()) -> [{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}]. opened_files_contains(FileNameFragment) -> - lists:filter(fun({_Port, _Pid, _Fd, Path}) -> - string:str(Path, FileNameFragment) > 0 - end, couch_debug:opened_files()). - + lists:filter( + fun({_Port, _Pid, _Fd, Path}) -> + string:str(Path, FileNameFragment) > 0 + end, + couch_debug:opened_files() + ). process_name(Pid) when is_pid(Pid) -> Info = process_info(Pid, [registered_name, dictionary, initial_call]), @@ -260,7 +268,8 @@ link_tree(RootPid, Info) -> link_tree(RootPid, Info, Fun) -> {_, Result} = link_tree( - RootPid, [links | Info], gb_trees:empty(), 0, [RootPid], Fun), + RootPid, [links | Info], gb_trees:empty(), 0, [RootPid], Fun + ), Result. link_tree(RootPid, Info, Visited0, Pos, [Pid | Rest], Fun) -> @@ -272,21 +281,23 @@ link_tree(RootPid, Info, Visited0, Pos, [Pid | Rest], Fun) -> Visited1 = gb_trees:insert(Pid, Props, Visited0), {links, Children} = lists:keyfind(links, 1, Props), {Visited2, NewTree} = link_tree( - RootPid, Info, Visited1, Pos + 1, Children, Fun), + RootPid, Info, Visited1, Pos + 1, Children, Fun + ), {Visited3, Result} = link_tree( - RootPid, Info, Visited2, Pos, Rest, Fun), - {Visited3, [{Pos, {Pid, Fun(Pid, Props), NewTree}}] ++ Result}; + RootPid, Info, Visited2, Pos, Rest, Fun + ), + {Visited3, [{Pos, {Pid, Fun(Pid, Props), NewTree}}] ++ Result}; none -> Props = info(Pid, Info), Visited1 = gb_trees:insert(Pid, Props, Visited0), {Visited2, Result} = link_tree( - RootPid, Info, Visited1, Pos, Rest, Fun), + RootPid, Info, Visited1, Pos, Rest, Fun + ), {Visited2, [{Pos, {Pid, Fun(Pid, Props), []}}] ++ Result} end; link_tree(_RootPid, _Info, Visited, _Pos, [], _Fun) -> {Visited, []}. - info(Pid, Info) when is_pid(Pid) -> ValidProps = [ backtrace, @@ -340,12 +351,16 @@ info(Port, Info) when is_port(Port) -> port_info(Port, lists:usort(Validated)). port_info(Port, Items) -> - lists:foldl(fun(Item, Acc) -> - case (catch erlang:port_info(Port, Item)) of - {Item, _Value} = Info -> [Info | Acc]; - _Else -> Acc - end - end, [], Items). + lists:foldl( + fun(Item, Acc) -> + case (catch erlang:port_info(Port, Item)) of + {Item, _Value} = Info -> [Info | Acc]; + _Else -> Acc + end + end, + [], + Items + ). mapfold_tree([], Acc, _Fun) -> {[], Acc}; @@ -380,8 +395,10 @@ print_linked_processes(Name) when is_atom(Name) -> print_linked_processes(Pid) when is_pid(Pid) -> Info = [reductions, message_queue_len, memory], TableSpec = [ - {50, left, name}, {12, centre, reductions}, - {19, centre, message_queue_len}, {10, centre, memory} + {50, left, name}, + {12, centre, reductions}, + {19, centre, message_queue_len}, + {10, centre, memory} ], Tree = linked_processes_info(Pid, Info), print_tree(Tree, TableSpec). @@ -390,9 +407,12 @@ id("couch_file:init" ++ _, Pid, _Props) -> case couch_file:process_info(Pid) of {{file_descriptor, prim_file, {Port, Fd}}, FilePath} -> term2str([ - term2str(Fd), ":", - term2str(Port), ":", - shorten_path(FilePath)]); + term2str(Fd), + ":", + term2str(Port), + ":", + shorten_path(FilePath) + ]); undefined -> "" end; @@ -402,8 +422,11 @@ id(_IdStr, _Pid, _Props) -> print_couch_index_server_processes() -> Info = [reductions, message_queue_len, memory], TableSpec = [ - {50, left, name}, {12, centre, reductions}, - {19, centre, message_queue_len}, {14, centre, memory}, {id} + {50, left, name}, + {12, centre, reductions}, + {19, centre, message_queue_len}, + {14, centre, memory}, + {id} ], Tree = link_tree(whereis(couch_index_server), Info, fun(P, Props) -> @@ -476,31 +499,40 @@ random_processes(Pids, 0) -> random_processes(Acc, Depth) -> Caller = self(), Ref = make_ref(), - Pid = case oneof([spawn_link, open_port]) of - spawn_monitor -> - {P, _} = spawn_monitor(fun() -> - Caller ! {Ref, random_processes(Depth - 1)}, - receive looper -> ok end - end), - P; - spawn -> - spawn(fun() -> - Caller ! {Ref, random_processes(Depth - 1)}, - receive looper -> ok end - end); - spawn_link -> - spawn_link(fun() -> - Caller ! {Ref, random_processes(Depth - 1)}, - receive looper -> ok end - end); - open_port -> - spawn_link(fun() -> - Port = erlang:open_port({spawn, "sleep 10"}, []), - true = erlang:link(Port), - Caller ! {Ref, random_processes(Depth - 1)}, - receive looper -> ok end - end) - end, + Pid = + case oneof([spawn_link, open_port]) of + spawn_monitor -> + {P, _} = spawn_monitor(fun() -> + Caller ! {Ref, random_processes(Depth - 1)}, + receive + looper -> ok + end + end), + P; + spawn -> + spawn(fun() -> + Caller ! {Ref, random_processes(Depth - 1)}, + receive + looper -> ok + end + end); + spawn_link -> + spawn_link(fun() -> + Caller ! {Ref, random_processes(Depth - 1)}, + receive + looper -> ok + end + end); + open_port -> + spawn_link(fun() -> + Port = erlang:open_port({spawn, "sleep 10"}, []), + true = erlang:link(Port), + Caller ! {Ref, random_processes(Depth - 1)}, + receive + looper -> ok + end + end) + end, receive {Ref, Pids} -> random_processes([Pid | Pids] ++ Acc, Depth - 1) end. @@ -508,7 +540,6 @@ random_processes(Acc, Depth) -> oneof(Options) -> lists:nth(couch_rand:uniform(length(Options)), Options). - tree() -> [InitialPid | _] = Processes = random_processes(5), {InitialPid, Processes, link_tree(InitialPid)}. @@ -524,7 +555,8 @@ link_tree_test_() -> "link_tree tests", { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_have_same_shape/1, fun should_include_extra_info/1 @@ -534,16 +566,16 @@ link_tree_test_() -> should_have_same_shape({InitialPid, _Processes, Tree}) -> ?_test(begin - InfoTree = linked_processes_info(InitialPid, []), - ?assert(is_equal(InfoTree, Tree)), - ok + InfoTree = linked_processes_info(InitialPid, []), + ?assert(is_equal(InfoTree, Tree)), + ok end). should_include_extra_info({InitialPid, _Processes, _Tree}) -> Info = [reductions, message_queue_len, memory], ?_test(begin - InfoTree = linked_processes_info(InitialPid, Info), - map_tree(InfoTree, fun(Key, {_Id, Props}, _Pos) -> + InfoTree = linked_processes_info(InitialPid, Info), + map_tree(InfoTree, fun(Key, {_Id, Props}, _Pos) -> case Key of Pid when is_pid(Pid) -> ?assert(lists:keymember(reductions, 1, Props)), @@ -553,11 +585,12 @@ should_include_extra_info({InitialPid, _Processes, _Tree}) -> ok end, Props - end), - ok + end), + ok end). -is_equal([], []) -> true; +is_equal([], []) -> + true; is_equal([{Pos, {Pid, _, A}} | RestA], [{Pos, {Pid, _, B}} | RestB]) -> case is_equal(RestA, RestB) of false -> false; diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl index ec16d21db..5d44e456c 100644 --- a/src/couch/src/couch_doc.erl +++ b/src/couch/src/couch_doc.erl @@ -12,7 +12,7 @@ -module(couch_doc). --export([to_doc_info/1,to_doc_info_path/1,parse_rev/1,parse_revs/1,rev_to_str/1,revs_to_strs/1]). +-export([to_doc_info/1, to_doc_info_path/1, parse_rev/1, parse_revs/1, rev_to_str/1, revs_to_strs/1]). -export([from_json_obj/1, from_json_obj_validate/1]). -export([from_json_obj/2, from_json_obj_validate/2]). -export([to_json_obj/2, has_stubs/1, merge_stubs/2]). @@ -26,15 +26,14 @@ -export([with_ejson_body/1]). -export([is_deleted/1]). - -include_lib("couch/include/couch_db.hrl"). -spec to_path(#doc{}) -> path(). -to_path(#doc{revs={Start, RevIds}}=Doc) -> +to_path(#doc{revs = {Start, RevIds}} = Doc) -> [Branch] = to_branch(Doc, lists:reverse(RevIds)), {Start - length(RevIds) + 1, Branch}. --spec to_branch(#doc{}, [RevId::binary()]) -> [branch()]. +-spec to_branch(#doc{}, [RevId :: binary()]) -> [branch()]. to_branch(Doc, [RevId]) -> [{RevId, Doc, []}]; to_branch(Doc, [RevId | Rest]) -> @@ -43,8 +42,8 @@ to_branch(Doc, [RevId | Rest]) -> % helpers used by to_json_obj to_json_rev(0, []) -> []; -to_json_rev(Start, [FirstRevId|_]) -> - [{<<"_rev">>, ?l2b([integer_to_list(Start),"-",revid_to_str(FirstRevId)])}]. +to_json_rev(Start, [FirstRevId | _]) -> + [{<<"_rev">>, ?l2b([integer_to_list(Start), "-", revid_to_str(FirstRevId)])}]. to_json_body(true, {Body}) -> Body ++ [{<<"_deleted">>, true}]; @@ -52,53 +51,69 @@ to_json_body(false, {Body}) -> Body. to_json_revisions(Options, Start, RevIds0) -> - RevIds = case proplists:get_value(revs, Options) of + RevIds = + case proplists:get_value(revs, Options) of + true -> + RevIds0; + Num when is_integer(Num), Num > 0 -> + lists:sublist(RevIds0, Num); + _ -> + [] + end, + if + RevIds == [] -> + []; true -> - RevIds0; - Num when is_integer(Num), Num > 0 -> - lists:sublist(RevIds0, Num); - _ -> - [] - end, - if RevIds == [] -> []; true -> - [{<<"_revisions">>, {[{<<"start">>, Start}, - {<<"ids">>, [revid_to_str(R) ||R <- RevIds]}]}}] + [ + {<<"_revisions">>, + {[ + {<<"start">>, Start}, + {<<"ids">>, [revid_to_str(R) || R <- RevIds]} + ]}} + ] end. - revid_to_str(RevId) when size(RevId) =:= 16 -> ?l2b(couch_util:to_hex(RevId)); revid_to_str(RevId) -> RevId. rev_to_str({Pos, RevId}) -> - ?l2b([integer_to_list(Pos),"-",revid_to_str(RevId)]). - + ?l2b([integer_to_list(Pos), "-", revid_to_str(RevId)]). revs_to_strs([]) -> []; -revs_to_strs([{Pos, RevId}| Rest]) -> +revs_to_strs([{Pos, RevId} | Rest]) -> [rev_to_str({Pos, RevId}) | revs_to_strs(Rest)]. to_json_meta(Meta) -> lists:flatmap( - fun({revs_info, Start, RevsInfo}) -> - {JsonRevsInfo, _Pos} = lists:mapfoldl( - fun({RevId, Status}, PosAcc) -> - JsonObj = {[{<<"rev">>, rev_to_str({PosAcc, RevId})}, - {<<"status">>, ?l2b(atom_to_list(Status))}]}, - {JsonObj, PosAcc - 1} - end, Start, RevsInfo), - [{<<"_revs_info">>, JsonRevsInfo}]; - ({local_seq, Seq}) -> - [{<<"_local_seq">>, Seq}]; - ({conflicts, Conflicts}) -> - [{<<"_conflicts">>, revs_to_strs(Conflicts)}]; - ({deleted_conflicts, DConflicts}) -> - [{<<"_deleted_conflicts">>, revs_to_strs(DConflicts)}]; - (_) -> - [] - end, Meta). + fun + ({revs_info, Start, RevsInfo}) -> + {JsonRevsInfo, _Pos} = lists:mapfoldl( + fun({RevId, Status}, PosAcc) -> + JsonObj = + {[ + {<<"rev">>, rev_to_str({PosAcc, RevId})}, + {<<"status">>, ?l2b(atom_to_list(Status))} + ]}, + {JsonObj, PosAcc - 1} + end, + Start, + RevsInfo + ), + [{<<"_revs_info">>, JsonRevsInfo}]; + ({local_seq, Seq}) -> + [{<<"_local_seq">>, Seq}]; + ({conflicts, Conflicts}) -> + [{<<"_conflicts">>, revs_to_strs(Conflicts)}]; + ({deleted_conflicts, DConflicts}) -> + [{<<"_deleted_conflicts">>, revs_to_strs(DConflicts)}]; + (_) -> + [] + end, + Meta + ). to_json_attachments(Attachments, Options) -> to_json_attachments( @@ -117,14 +132,23 @@ to_json_attachments(Atts, OutputData, Follows, ShowEnc) -> to_json_obj(Doc, Options) -> doc_to_json_obj(with_ejson_body(Doc), Options). -doc_to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds}, - meta=Meta}=Doc,Options)-> - {[{<<"_id">>, Id}] - ++ to_json_rev(Start, RevIds) - ++ to_json_body(Del, Body) - ++ to_json_revisions(Options, Start, RevIds) - ++ to_json_meta(Meta) - ++ to_json_attachments(Doc#doc.atts, Options) +doc_to_json_obj( + #doc{ + id = Id, + deleted = Del, + body = Body, + revs = {Start, RevIds}, + meta = Meta + } = Doc, + Options +) -> + { + [{<<"_id">>, Id}] ++ + to_json_rev(Start, RevIds) ++ + to_json_body(Del, Body) ++ + to_json_revisions(Options, Start, RevIds) ++ + to_json_meta(Meta) ++ + to_json_attachments(Doc#doc.atts, Options) }. from_json_obj_validate(EJson) -> @@ -135,48 +159,54 @@ from_json_obj_validate(EJson, DbName) -> Doc = from_json_obj(EJson, DbName), case couch_ejson_size:encoded_size(Doc#doc.body) =< MaxSize of true -> - validate_attachment_sizes(Doc#doc.atts), - Doc; + validate_attachment_sizes(Doc#doc.atts), + Doc; false -> throw({request_entity_too_large, Doc#doc.id}) end. - validate_attachment_sizes([]) -> ok; validate_attachment_sizes(Atts) -> MaxAttSize = couch_att:max_attachment_size(), - lists:foreach(fun(Att) -> - AttName = couch_att:fetch(name, Att), - AttSize = couch_att:fetch(att_len, Att), - couch_att:validate_attachment_size(AttName, AttSize, MaxAttSize) - end, Atts). - + lists:foreach( + fun(Att) -> + AttName = couch_att:fetch(name, Att), + AttSize = couch_att:fetch(att_len, Att), + couch_att:validate_attachment_size(AttName, AttSize, MaxAttSize) + end, + Atts + ). from_json_obj({Props}) -> from_json_obj({Props}, undefined). from_json_obj({Props}, DbName) -> - transfer_fields(Props, #doc{body=[]}, DbName); + transfer_fields(Props, #doc{body = []}, DbName); from_json_obj(_Other, _) -> throw({bad_request, "Document must be a JSON object"}). parse_revid(RevId) when size(RevId) =:= 32 -> RevInt = erlang:list_to_integer(?b2l(RevId), 16), - <<RevInt:128>>; + <<RevInt:128>>; parse_revid(RevId) when length(RevId) =:= 32 -> RevInt = erlang:list_to_integer(RevId, 16), - <<RevInt:128>>; + <<RevInt:128>>; parse_revid(RevId) when is_binary(RevId) -> RevId; parse_revid(RevId) when is_list(RevId) -> ?l2b(RevId). - parse_rev(Rev) when is_binary(Rev) -> parse_rev(?b2l(Rev)); parse_rev(Rev) when is_list(Rev) -> - SplitRev = lists:splitwith(fun($-) -> false; (_) -> true end, Rev), + SplitRev = lists:splitwith( + fun + ($-) -> false; + (_) -> true + end, + Rev + ), case SplitRev of {Pos, [$- | RevId]} -> try @@ -185,7 +215,8 @@ parse_rev(Rev) when is_list(Rev) -> catch error:badarg -> throw({bad_request, <<"Invalid rev format">>}) end; - _Else -> throw({bad_request, <<"Invalid rev format">>}) + _Else -> + throw({bad_request, <<"Invalid rev format">>}) end; parse_rev(_BadRev) -> throw({bad_request, <<"Invalid rev format">>}). @@ -197,10 +228,11 @@ parse_revs([Rev | Rest]) -> parse_revs(_) -> throw({bad_request, "Invalid list of revisions"}). - validate_docid(DocId, DbName) -> - case DbName =:= ?l2b(config:get("mem3", "shards_db", "_dbs")) andalso - couch_db:is_system_db_name(DocId) of + case + DbName =:= ?l2b(config:get("mem3", "shards_db", "_dbs")) andalso + couch_db:is_system_db_name(DocId) + of true -> ok; false -> @@ -214,10 +246,11 @@ validate_docid(<<"_design/">>) -> validate_docid(<<"_local/">>) -> throw({illegal_docid, <<"Illegal document id `_local/`">>}); validate_docid(Id) when is_binary(Id) -> - MaxLen = case config:get("couchdb", "max_document_id_length", "infinity") of - "infinity" -> infinity; - IntegerVal -> list_to_integer(IntegerVal) - end, + MaxLen = + case config:get("couchdb", "max_document_id_length", "infinity") of + "infinity" -> infinity; + IntegerVal -> list_to_integer(IntegerVal) + end, case MaxLen > 0 andalso byte_size(Id) > MaxLen of true -> throw({illegal_docid, <<"Document id is too long">>}); false -> ok @@ -227,69 +260,72 @@ validate_docid(Id) when is_binary(Id) -> true -> ok end, case Id of - <<"_design/", _/binary>> -> ok; - <<"_local/", _/binary>> -> ok; - <<"_", _/binary>> -> - case couch_db_plugin:validate_docid(Id) of - true -> - ok; - false -> - throw( - {illegal_docid, - <<"Only reserved document ids may start with underscore.">>}) - end; - _Else -> ok + <<"_design/", _/binary>> -> + ok; + <<"_local/", _/binary>> -> + ok; + <<"_", _/binary>> -> + case couch_db_plugin:validate_docid(Id) of + true -> + ok; + false -> + throw( + {illegal_docid, <<"Only reserved document ids may start with underscore.">>} + ) + end; + _Else -> + ok end; validate_docid(Id) -> couch_log:debug("Document id is not a string: ~p", [Id]), throw({illegal_docid, <<"Document id must be a string">>}). -transfer_fields([], #doc{body=Fields}=Doc, _) -> +transfer_fields([], #doc{body = Fields} = Doc, _) -> % convert fields back to json object - Doc#doc{body={lists:reverse(Fields)}}; - + Doc#doc{body = {lists:reverse(Fields)}}; transfer_fields([{<<"_id">>, Id} | Rest], Doc, DbName) -> validate_docid(Id, DbName), - transfer_fields(Rest, Doc#doc{id=Id}, DbName); - -transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc, DbName) -> + transfer_fields(Rest, Doc#doc{id = Id}, DbName); +transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs = {0, []}} = Doc, DbName) -> {Pos, RevId} = parse_rev(Rev), - transfer_fields(Rest, - Doc#doc{revs={Pos, [RevId]}}, DbName); - + transfer_fields( + Rest, + Doc#doc{revs = {Pos, [RevId]}}, + DbName + ); transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc, DbName) -> % we already got the rev from the _revisions transfer_fields(Rest, Doc, DbName); - transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc, DbName) -> Atts = [couch_att:from_json(Name, Props) || {Name, {Props}} <- JsonBins], - transfer_fields(Rest, Doc#doc{atts=Atts}, DbName); - + transfer_fields(Rest, Doc#doc{atts = Atts}, DbName); transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc, DbName) -> RevIds = couch_util:get_value(<<"ids">>, Props), Start = couch_util:get_value(<<"start">>, Props), - if not is_integer(Start) -> - throw({doc_validation, "_revisions.start isn't an integer."}); - not is_list(RevIds) -> - throw({doc_validation, "_revisions.ids isn't a array."}); - true -> - ok + if + not is_integer(Start) -> + throw({doc_validation, "_revisions.start isn't an integer."}); + not is_list(RevIds) -> + throw({doc_validation, "_revisions.ids isn't a array."}); + true -> + ok end, - RevIds2 = lists:map(fun(RevId) -> - try - parse_revid(RevId) - catch - error:function_clause -> - throw({doc_validation, "RevId isn't a string"}); - error:badarg -> - throw({doc_validation, "RevId isn't a valid hexadecimal"}) - end - end, RevIds), - transfer_fields(Rest, Doc#doc{revs={Start, RevIds2}}, DbName); - + RevIds2 = lists:map( + fun(RevId) -> + try + parse_revid(RevId) + catch + error:function_clause -> + throw({doc_validation, "RevId isn't a string"}); + error:badarg -> + throw({doc_validation, "RevId isn't a valid hexadecimal"}) + end + end, + RevIds + ), + transfer_fields(Rest, Doc#doc{revs = {Start, RevIds2}}, DbName); transfer_fields([{<<"_deleted">>, B} | Rest], Doc, DbName) when is_boolean(B) -> - transfer_fields(Rest, Doc#doc{deleted=B}, DbName); - + transfer_fields(Rest, Doc#doc{deleted = B}, DbName); % ignored fields transfer_fields([{<<"_revs_info">>, _} | Rest], Doc, DbName) -> transfer_fields(Rest, Doc, DbName); @@ -299,36 +335,49 @@ transfer_fields([{<<"_conflicts">>, _} | Rest], Doc, DbName) -> transfer_fields(Rest, Doc, DbName); transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc, DbName) -> transfer_fields(Rest, Doc, DbName); - % special field for per doc access control, for future compatibility -transfer_fields([{<<"_access">>, _} = Field | Rest], - #doc{body=Fields} = Doc, DbName) -> - transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName); - +transfer_fields( + [{<<"_access">>, _} = Field | Rest], + #doc{body = Fields} = Doc, + DbName +) -> + transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName); % special fields for replication documents -transfer_fields([{<<"_replication_state">>, _} = Field | Rest], - #doc{body=Fields} = Doc, DbName) -> - transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName); -transfer_fields([{<<"_replication_state_time">>, _} = Field | Rest], - #doc{body=Fields} = Doc, DbName) -> - transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName); -transfer_fields([{<<"_replication_state_reason">>, _} = Field | Rest], - #doc{body=Fields} = Doc, DbName) -> - transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName); -transfer_fields([{<<"_replication_id">>, _} = Field | Rest], - #doc{body=Fields} = Doc, DbName) -> - transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName); -transfer_fields([{<<"_replication_stats">>, _} = Field | Rest], - #doc{body=Fields} = Doc, DbName) -> - transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName); - +transfer_fields( + [{<<"_replication_state">>, _} = Field | Rest], + #doc{body = Fields} = Doc, + DbName +) -> + transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName); +transfer_fields( + [{<<"_replication_state_time">>, _} = Field | Rest], + #doc{body = Fields} = Doc, + DbName +) -> + transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName); +transfer_fields( + [{<<"_replication_state_reason">>, _} = Field | Rest], + #doc{body = Fields} = Doc, + DbName +) -> + transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName); +transfer_fields( + [{<<"_replication_id">>, _} = Field | Rest], + #doc{body = Fields} = Doc, + DbName +) -> + transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName); +transfer_fields( + [{<<"_replication_stats">>, _} = Field | Rest], + #doc{body = Fields} = Doc, + DbName +) -> + transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName); % unknown special field -transfer_fields([{<<"_",Name/binary>>, _} | _], _, _) -> - throw({doc_validation, - ?l2b(io_lib:format("Bad special document member: _~s", [Name]))}); - -transfer_fields([Field | Rest], #doc{body=Fields}=Doc, DbName) -> - transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName). +transfer_fields([{<<"_", Name/binary>>, _} | _], _, _) -> + throw({doc_validation, ?l2b(io_lib:format("Bad special document member: _~s", [Name]))}); +transfer_fields([Field | Rest], #doc{body = Fields} = Doc, DbName) -> + transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName). to_doc_info(FullDocInfo) -> {DocInfo, _Path} = to_doc_info_path(FullDocInfo), @@ -340,10 +389,11 @@ max_seq(Tree, UpdateSeq) -> {_Deleted, _DiskPos, OldTreeSeq} -> % Older versions didn't track data sizes. erlang:max(MaxOldSeq, OldTreeSeq); - {_Deleted, _DiskPos, OldTreeSeq, _Size} -> % necessary clause? + % necessary clause? + {_Deleted, _DiskPos, OldTreeSeq, _Size} -> % Older versions didn't store #leaf records. erlang:max(MaxOldSeq, OldTreeSeq); - #leaf{seq=OldTreeSeq} -> + #leaf{seq = OldTreeSeq} -> erlang:max(MaxOldSeq, OldTreeSeq); _ -> MaxOldSeq @@ -351,20 +401,25 @@ max_seq(Tree, UpdateSeq) -> end, couch_key_tree:fold(FoldFun, UpdateSeq, Tree). -to_doc_info_path(#full_doc_info{id=Id,rev_tree=Tree,update_seq=FDISeq}) -> +to_doc_info_path(#full_doc_info{id = Id, rev_tree = Tree, update_seq = FDISeq}) -> RevInfosAndPath = [ - {rev_info(Node), Path} || {_Leaf, Path} = Node <- + {rev_info(Node), Path} + || {_Leaf, Path} = Node <- couch_key_tree:get_all_leafs(Tree) ], SortedRevInfosAndPath = lists:sort( - fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA}, - {#rev_info{deleted=DeletedB,rev=RevB}, _PathB}) -> + fun( + {#rev_info{deleted = DeletedA, rev = RevA}, _PathA}, + {#rev_info{deleted = DeletedB, rev = RevB}, _PathB} + ) -> % sort descending by {not deleted, rev} {not DeletedA, RevA} > {not DeletedB, RevB} - end, RevInfosAndPath), - [{_RevInfo, WinPath}|_] = SortedRevInfosAndPath, + end, + RevInfosAndPath + ), + [{_RevInfo, WinPath} | _] = SortedRevInfosAndPath, RevInfos = [RevInfo || {RevInfo, _Path} <- SortedRevInfosAndPath], - {#doc_info{id=Id, high_seq=max_seq(Tree, FDISeq), revs=RevInfos}, WinPath}. + {#doc_info{id = Id, high_seq = max_seq(Tree, FDISeq), revs = RevInfos}, WinPath}. rev_info({#leaf{} = Leaf, {Pos, [RevId | _]}}) -> #rev_info{ @@ -381,53 +436,56 @@ rev_info({#doc{} = Doc, {Pos, [RevId | _]}}) -> rev = {Pos, RevId} }. -is_deleted(#full_doc_info{rev_tree=Tree}) -> +is_deleted(#full_doc_info{rev_tree = Tree}) -> is_deleted(Tree); is_deleted(Tree) -> Leafs = couch_key_tree:get_all_leafs(Tree), try - lists:foldl(fun - ({#leaf{deleted=false},_}, _) -> - throw(not_deleted); - ({#doc{deleted=false},_}, _) -> - throw(not_deleted); - (_, Acc) -> - Acc - end, nil, Leafs), + lists:foldl( + fun + ({#leaf{deleted = false}, _}, _) -> + throw(not_deleted); + ({#doc{deleted = false}, _}, _) -> + throw(not_deleted); + (_, Acc) -> + Acc + end, + nil, + Leafs + ), true - catch throw:not_deleted -> - false + catch + throw:not_deleted -> + false end. - get_validate_doc_fun({Props}) -> get_validate_doc_fun(couch_doc:from_json_obj({Props})); -get_validate_doc_fun(#doc{body={Props}}=DDoc) -> +get_validate_doc_fun(#doc{body = {Props}} = DDoc) -> case couch_util:get_value(<<"validate_doc_update">>, Props) of - undefined -> - nil; - _Else -> - fun(EditDoc, DiskDoc, Ctx, SecObj) -> - couch_query_servers:validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) - end + undefined -> + nil; + _Else -> + fun(EditDoc, DiskDoc, Ctx, SecObj) -> + couch_query_servers:validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) + end end. - -has_stubs(#doc{atts=Atts}) -> +has_stubs(#doc{atts = Atts}) -> lists:any(fun couch_att:is_stub/1, Atts); has_stubs(Atts) -> lists:any(fun couch_att:is_stub/1, Atts). merge_stubs(#doc{id = Id}, nil) -> throw({missing_stub, <<"Previous revision missing for document ", Id/binary>>}); -merge_stubs(#doc{id=Id,atts=MemBins}=StubsDoc, #doc{atts=DiskBins}) -> +merge_stubs(#doc{id = Id, atts = MemBins} = StubsDoc, #doc{atts = DiskBins}) -> case couch_att:merge_stubs(MemBins, DiskBins) of {ok, MergedBins} -> StubsDoc#doc{atts = MergedBins}; {missing, Name} -> - throw({missing_stub, - <<"Invalid attachment stub in ", Id/binary, " for ", Name/binary>> - }) + throw( + {missing_stub, <<"Invalid attachment stub in ", Id/binary, " for ", Name/binary>>} + ) end. len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, SendEncodedAtts) -> @@ -435,28 +493,38 @@ len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, SendEncodedAtts) -> AttsDecoded = decode_attributes(AttsToInclude, SendEncodedAtts), couch_httpd_multipart:length_multipart_stream(Boundary, JsonBytes, AttsDecoded). - -doc_to_multi_part_stream(Boundary, JsonBytes, Atts, WriteFun, - SendEncodedAtts) -> - AttsToInclude = lists:filter(fun(Att)-> couch_att:fetch(data, Att) /= stub end, Atts), +doc_to_multi_part_stream( + Boundary, + JsonBytes, + Atts, + WriteFun, + SendEncodedAtts +) -> + AttsToInclude = lists:filter(fun(Att) -> couch_att:fetch(data, Att) /= stub end, Atts), AttsDecoded = decode_attributes(AttsToInclude, SendEncodedAtts), - AttFun = case SendEncodedAtts of - false -> fun couch_att:foldl_decode/3; - true -> fun couch_att:foldl/3 - end, + AttFun = + case SendEncodedAtts of + false -> fun couch_att:foldl_decode/3; + true -> fun couch_att:foldl/3 + end, couch_httpd_multipart:encode_multipart_stream( - Boundary, JsonBytes, AttsDecoded, WriteFun, AttFun). + Boundary, JsonBytes, AttsDecoded, WriteFun, AttFun + ). decode_attributes(Atts, SendEncodedAtts) -> - lists:map(fun(Att) -> - [Name, AttLen, DiskLen, Type, Encoding] = - couch_att:fetch([name, att_len, disk_len, type, encoding], Att), - Len = case SendEncodedAtts of - true -> AttLen; - false -> DiskLen - end, - {Att, Name, Len, Type, Encoding} - end, Atts). + lists:map( + fun(Att) -> + [Name, AttLen, DiskLen, Type, Encoding] = + couch_att:fetch([name, att_len, disk_len, type, encoding], Att), + Len = + case SendEncodedAtts of + true -> AttLen; + false -> DiskLen + end, + {Att, Name, Len, Type, Encoding} + end, + Atts + ). doc_from_multi_part_stream(ContentType, DataFun) -> doc_from_multi_part_stream(ContentType, DataFun, make_ref()). @@ -466,25 +534,32 @@ doc_from_multi_part_stream(ContentType, DataFun, Ref) -> doc_from_multi_part_stream(ContentType, DataFun, Ref, ValidateDocLimits) -> case couch_httpd_multipart:decode_multipart_stream(ContentType, DataFun, Ref) of - {{started_open_doc_revs, NewRef}, Parser, _ParserRef} -> - restart_open_doc_revs(Parser, Ref, NewRef); - {{doc_bytes, Ref, DocBytes}, Parser, ParserRef} -> - Doc = case ValidateDocLimits of - true -> - from_json_obj_validate(?JSON_DECODE(DocBytes)); - false -> - from_json_obj(?JSON_DECODE(DocBytes)) - end, - erlang:put(mochiweb_request_recv, true), - % we'll send the Parser process ID to the remote nodes so they can - % retrieve their own copies of the attachment data - WithParser = fun(follows) -> {follows, Parser, Ref}; (D) -> D end, - Atts = [couch_att:transform(data, WithParser, A) || A <- Doc#doc.atts], - WaitFun = fun() -> - receive {'DOWN', ParserRef, _, _, _} -> ok end - end, - {ok, Doc#doc{atts=Atts}, WaitFun, Parser}; - ok -> ok + {{started_open_doc_revs, NewRef}, Parser, _ParserRef} -> + restart_open_doc_revs(Parser, Ref, NewRef); + {{doc_bytes, Ref, DocBytes}, Parser, ParserRef} -> + Doc = + case ValidateDocLimits of + true -> + from_json_obj_validate(?JSON_DECODE(DocBytes)); + false -> + from_json_obj(?JSON_DECODE(DocBytes)) + end, + erlang:put(mochiweb_request_recv, true), + % we'll send the Parser process ID to the remote nodes so they can + % retrieve their own copies of the attachment data + WithParser = fun + (follows) -> {follows, Parser, Ref}; + (D) -> D + end, + Atts = [couch_att:transform(data, WithParser, A) || A <- Doc#doc.atts], + WaitFun = fun() -> + receive + {'DOWN', ParserRef, _, _, _} -> ok + end + end, + {ok, Doc#doc{atts = Atts}, WaitFun, Parser}; + ok -> + ok end. restart_open_doc_revs(Parser, Ref, NewRef) -> @@ -493,7 +568,6 @@ restart_open_doc_revs(Parser, Ref, NewRef) -> flush_parser_messages(Ref), erlang:error({restart_open_doc_revs, NewRef}). - flush_parser_messages(Ref) -> receive {headers, Ref, _} -> @@ -508,7 +582,6 @@ flush_parser_messages(Ref) -> ok end. - with_ejson_body(#doc{body = Body} = Doc) when is_binary(Body) -> Doc#doc{body = couch_compress:decompress(Body)}; with_ejson_body(#doc{body = {_}} = Doc) -> diff --git a/src/couch/src/couch_ejson_compare.erl b/src/couch/src/couch_ejson_compare.erl index b02b9ba7c..628bc2fab 100644 --- a/src/couch/src/couch_ejson_compare.erl +++ b/src/couch/src/couch_ejson_compare.erl @@ -27,10 +27,8 @@ compare_strings_nif/2 ]). - -on_load(init/0). - init() -> NumScheds = erlang:system_info(schedulers), Dir = code:priv_dir(couch), @@ -39,65 +37,54 @@ init() -> % partitioned row comparison less({p, PA, A}, {p, PB, B}) -> less([PA, A], [PB, B]); - less(A, B) -> try less_nif(A, B) catch - error:max_depth_error -> - % The EJSON structure is too deep, fallback to Erlang land. - less_erl(A, B) + error:max_depth_error -> + % The EJSON structure is too deep, fallback to Erlang land. + less_erl(A, B) end. less_json_ids({JsonA, IdA}, {JsonB, IdB}) -> case less(JsonA, JsonB) of - 0 -> - IdA < IdB; - Result -> - Result < 0 + 0 -> + IdA < IdB; + Result -> + Result < 0 end. -less_json(A,B) -> +less_json(A, B) -> less(A, B) < 0. - get_icu_version() -> erlang:nif_error(get_icu_version). - get_uca_version() -> erlang:nif_error(get_uca_version). - less_nif(A, B) -> erlang:nif_error(less_nif_load_error, [A, B]). - compare_strings_nif(A, B) -> erlang:nif_error(compare_string_nif, [A, B]). - -less_erl(A,A) -> 0; - -less_erl(A,B) when is_atom(A), is_atom(B) -> atom_sort(A) - atom_sort(B); -less_erl(A,_) when is_atom(A) -> -1; -less_erl(_,B) when is_atom(B) -> 1; - -less_erl(A,B) when is_number(A), is_number(B) -> A - B; -less_erl(A,_) when is_number(A) -> -1; -less_erl(_,B) when is_number(B) -> 1; - -less_erl(A,B) when is_binary(A), is_binary(B) -> compare_strings_nif(A,B); -less_erl(A,_) when is_binary(A) -> -1; -less_erl(_,B) when is_binary(B) -> 1; - -less_erl(A,B) when is_list(A), is_list(B) -> less_list(A,B); -less_erl(A,_) when is_list(A) -> -1; -less_erl(_,B) when is_list(B) -> 1; - -less_erl({A},{B}) when is_list(A), is_list(B) -> less_props(A,B); -less_erl({A},_) when is_list(A) -> -1; -less_erl(_,{B}) when is_list(B) -> 1. +less_erl(A, A) -> 0; +less_erl(A, B) when is_atom(A), is_atom(B) -> atom_sort(A) - atom_sort(B); +less_erl(A, _) when is_atom(A) -> -1; +less_erl(_, B) when is_atom(B) -> 1; +less_erl(A, B) when is_number(A), is_number(B) -> A - B; +less_erl(A, _) when is_number(A) -> -1; +less_erl(_, B) when is_number(B) -> 1; +less_erl(A, B) when is_binary(A), is_binary(B) -> compare_strings_nif(A, B); +less_erl(A, _) when is_binary(A) -> -1; +less_erl(_, B) when is_binary(B) -> 1; +less_erl(A, B) when is_list(A), is_list(B) -> less_list(A, B); +less_erl(A, _) when is_list(A) -> -1; +less_erl(_, B) when is_list(B) -> 1; +less_erl({A}, {B}) when is_list(A), is_list(B) -> less_props(A, B); +less_erl({A}, _) when is_list(A) -> -1; +less_erl(_, {B}) when is_list(B) -> 1. atom_sort(null) -> 1; atom_sort(false) -> 2; @@ -105,33 +92,33 @@ atom_sort(true) -> 3. less_props([], []) -> 0; -less_props([], [_|_]) -> +less_props([], [_ | _]) -> -1; less_props(_, []) -> 1; -less_props([{AKey, AValue}|RestA], [{BKey, BValue}|RestB]) -> +less_props([{AKey, AValue} | RestA], [{BKey, BValue} | RestB]) -> case compare_strings_nif(AKey, BKey) of - 0 -> - case less_erl(AValue, BValue) of 0 -> - less_props(RestA, RestB); + case less_erl(AValue, BValue) of + 0 -> + less_props(RestA, RestB); + Result -> + Result + end; Result -> Result - end; - Result -> - Result end. less_list([], []) -> 0; -less_list([], [_|_]) -> +less_list([], [_ | _]) -> -1; less_list(_, []) -> 1; -less_list([A|RestA], [B|RestB]) -> - case less_erl(A,B) of - 0 -> - less_list(RestA, RestB); - Result -> - Result +less_list([A | RestA], [B | RestB]) -> + case less_erl(A, B) of + 0 -> + less_list(RestA, RestB); + Result -> + Result end. diff --git a/src/couch/src/couch_ejson_size.erl b/src/couch/src/couch_ejson_size.erl index f5505680f..54a7094ff 100644 --- a/src/couch/src/couch_ejson_size.erl +++ b/src/couch/src/couch_ejson_size.erl @@ -14,85 +14,78 @@ -export([encoded_size/1]). - %% Compound objects encoded_size({[]}) -> - 2; % opening { and closing } - + % opening { and closing } + 2; encoded_size({KVs}) -> % Would add 2 because opening { and closing }, but then inside the LC % would accumulate an extra , at the end so subtract 2 - 1 - 1 + lists:sum([encoded_size(K) + encoded_size(V) + 2 || {K,V} <- KVs]); - + 1 + lists:sum([encoded_size(K) + encoded_size(V) + 2 || {K, V} <- KVs]); encoded_size([]) -> - 2; % opening [ and closing ] - + % opening [ and closing ] + 2; encoded_size(List) when is_list(List) -> % 2 is for [ and ] but inside LC would accumulate an extra , so subtract % 2 - 1 1 + lists:sum([encoded_size(V) + 1 || V <- List]); - %% Floats. encoded_size(0.0) -> 3; - encoded_size(1.0) -> 3; - encoded_size(Float) when is_float(Float), Float < 0.0 -> encoded_size(-Float) + 1; - encoded_size(Float) when is_float(Float), Float < 1.0 -> if - Float =< 1.0e-300 -> 3; % close enough to 0.0 - Float =< 1.0e-100 -> 6; % Xe-YYY - Float =< 1.0e-10 -> 5; % Xe-YY - Float =< 0.01 -> 4; % Xe-Y, 0.0X - true -> 3 % 0.X + % close enough to 0.0 + Float =< 1.0e-300 -> 3; + % Xe-YYY + Float =< 1.0e-100 -> 6; + % Xe-YY + Float =< 1.0e-10 -> 5; + % Xe-Y, 0.0X + Float =< 0.01 -> 4; + % 0.X + true -> 3 end; - encoded_size(Float) when is_float(Float) -> if - Float >= 1.0e100 -> 5; % XeYYY - Float >= 1.0e10 -> 4; % XeYY - true -> 3 % XeY, X.Y + % XeYYY + Float >= 1.0e100 -> 5; + % XeYY + Float >= 1.0e10 -> 4; + % XeY, X.Y + true -> 3 end; - %% Integers encoded_size(0) -> 1; - encoded_size(Integer) when is_integer(Integer), Integer < 0 -> encoded_size(-Integer) + 1; - encoded_size(Integer) when is_integer(Integer) -> if - Integer < 10 -> 1; - Integer < 100 -> 2; - Integer < 1000 -> 3; + Integer < 10 -> 1; + Integer < 100 -> 2; + Integer < 1000 -> 3; Integer < 10000 -> 4; - true -> trunc(math:log10(Integer)) + 1 + true -> trunc(math:log10(Integer)) + 1 end; - %% Strings encoded_size(Binary) when is_binary(Binary) -> 2 + byte_size(Binary); - %% Special terminal symbols as atoms encoded_size(null) -> 4; - encoded_size(true) -> 4; - encoded_size(false) -> 5; - %% Other atoms encoded_size(Atom) when is_atom(Atom) -> diff --git a/src/couch/src/couch_emsort.erl b/src/couch/src/couch_emsort.erl index 430d94e01..9dcc08d67 100644 --- a/src/couch/src/couch_emsort.erl +++ b/src/couch/src/couch_emsort.erl @@ -142,36 +142,30 @@ num_bb = 0 }). - -define(REPORT_INTERVAL, 1000). - open(Fd) -> - {ok, #ems{fd=Fd}}. - + {ok, #ems{fd = Fd}}. open(Fd, Options) -> - {ok, set_options(#ems{fd=Fd}, Options)}. - + {ok, set_options(#ems{fd = Fd}, Options)}. set_options(Ems, []) -> Ems; set_options(Ems, [{root, Root} | Rest]) -> - set_options(Ems#ems{root=Root}, Rest); + set_options(Ems#ems{root = Root}, Rest); set_options(Ems, [{chain_chunk, Count} | Rest]) when is_integer(Count) -> - set_options(Ems#ems{chain_chunk=Count}, Rest); + set_options(Ems#ems{chain_chunk = Count}, Rest); set_options(Ems, [{back_bone_chunk, Count} | Rest]) when is_integer(Count) -> - set_options(Ems#ems{bb_chunk=Count}, Rest); + set_options(Ems#ems{bb_chunk = Count}, Rest); set_options(Ems, [{num_kvs, NumKVs} | Rest]) when is_integer(NumKVs) -> - set_options(Ems#ems{num_kvs=NumKVs}, Rest); + set_options(Ems#ems{num_kvs = NumKVs}, Rest); set_options(Ems, [{num_bb, NumBB} | Rest]) when is_integer(NumBB) -> - set_options(Ems#ems{num_bb=NumBB}, Rest). + set_options(Ems#ems{num_bb = NumBB}, Rest). - -get_fd(#ems{fd=Fd}) -> +get_fd(#ems{fd = Fd}) -> Fd. - get_state(#ems{} = Ems) -> #ems{ root = Root, @@ -184,7 +178,6 @@ get_state(#ems{} = Ems) -> {num_bb, NumBB} ]. - add(Ems, []) -> {ok, Ems}; add(Ems, KVs) -> @@ -195,69 +188,64 @@ add(Ems, KVs) -> num_bb = Ems#ems.num_bb + 1 }}. - -sort(#ems{}=Ems) -> +sort(#ems{} = Ems) -> {ok, Ems1} = merge(Ems), iter(Ems1). - merge(Ems) -> merge(Ems, fun(_) -> ok end). - -merge(#ems{root=undefined}=Ems, _Reporter) -> +merge(#ems{root = undefined} = Ems, _Reporter) -> {ok, Ems}; -merge(#ems{}=Ems, Reporter) -> +merge(#ems{} = Ems, Reporter) -> {ok, decimate(Ems, Reporter)}. - -iter(#ems{root=undefined}=Ems) -> +iter(#ems{root = undefined} = Ems) -> {ok, {Ems, []}}; -iter(#ems{root={BB, nil}}=Ems) -> +iter(#ems{root = {BB, nil}} = Ems) -> Chains = init_chains(Ems, small, BB), {ok, {Ems, Chains}}; -iter(#ems{root={_, _}}) -> +iter(#ems{root = {_, _}}) -> {error, not_merged}. - next({_Ems, []}) -> finished; next({Ems, Chains}) -> {KV, RestChains} = choose_kv(small, Ems, Chains), {ok, KV, {Ems, RestChains}}. - -num_kvs(#ems{num_kvs=NumKVs}) -> +num_kvs(#ems{num_kvs = NumKVs}) -> NumKVs. -num_merges(#ems{bb_chunk=BBChunk, num_bb=NumBB}) -> +num_merges(#ems{bb_chunk = BBChunk, num_bb = NumBB}) -> num_merges(BBChunk, NumBB). - -add_bb_pos(#ems{root=undefined}=Ems, Pos) -> - Ems#ems{root={[Pos], nil}}; -add_bb_pos(#ems{root={BB, Prev}}=Ems, Pos) -> +add_bb_pos(#ems{root = undefined} = Ems, Pos) -> + Ems#ems{root = {[Pos], nil}}; +add_bb_pos(#ems{root = {BB, Prev}} = Ems, Pos) -> {NewBB, NewPrev} = append_item(Ems, {BB, Prev}, Pos, Ems#ems.bb_chunk), - Ems#ems{root={NewBB, NewPrev}}. - + Ems#ems{root = {NewBB, NewPrev}}. write_kvs(Ems, KVs) -> % Write the list of KV's to disk in sorted order in chunks % of 100. Also make sure that the order is so that they % can be streamed in asscending order. {LastKVs, LastPos} = - lists:foldr(fun(KV, Acc) -> - append_item(Ems, Acc, KV, Ems#ems.chain_chunk) - end, {[], nil}, lists:sort(KVs)), + lists:foldr( + fun(KV, Acc) -> + append_item(Ems, Acc, KV, Ems#ems.chain_chunk) + end, + {[], nil}, + lists:sort(KVs) + ), {ok, Final, _} = couch_file:append_term(Ems#ems.fd, {LastKVs, LastPos}), Final. - -decimate(#ems{root={_BB, nil}}=Ems, _Reporter) -> +decimate(#ems{root = {_BB, nil}} = Ems, _Reporter) -> % We have less than bb_chunk backbone pointers so we're % good to start streaming KV's back to the client. Ems; -decimate(#ems{root={BB, NextBB}}=Ems, Reporter) -> +decimate(#ems{root = {BB, NextBB}} = Ems, Reporter) -> % To make sure we have a bounded amount of data in RAM % at any given point we first need to decimate the data % by performing the first couple iterations of a merge @@ -273,15 +261,13 @@ decimate(#ems{root={BB, NextBB}}=Ems, Reporter) -> % Continue deicmating until we have an acceptable bound on % the number of keys to use. - decimate(Ems#ems{root={FwdBB, FwdNextBB}}, Reporter). - + decimate(Ems#ems{root = {FwdBB, FwdNextBB}}, Reporter). merge_back_bone(Ems, Choose, BB, NextBB, Reporter) -> BBPos = merge_chains(Ems, Choose, BB, Reporter), Reporter(length(BB)), merge_rest_back_bone(Ems, Choose, NextBB, {[BBPos], nil}, Reporter). - merge_rest_back_bone(_Ems, _Choose, nil, Acc, _Reporter) -> Acc; merge_rest_back_bone(Ems, Choose, BBPos, Acc, Reporter) -> @@ -290,40 +276,39 @@ merge_rest_back_bone(Ems, Choose, BBPos, Acc, Reporter) -> {NewBB, NewPrev} = append_item(Ems, Acc, NewPos, Ems#ems.bb_chunk), merge_rest_back_bone(Ems, Choose, NextBB, {NewBB, NewPrev}, Reporter). - merge_chains(Ems, Choose, BB, Reporter) -> Chains = init_chains(Ems, Choose, BB), merge_chains(Ems, Choose, Chains, {[], nil}, Reporter, 0). - merge_chains(Ems, _Choose, [], ChainAcc, _Reporter, _Count) -> {ok, CPos, _} = couch_file:append_term(Ems#ems.fd, ChainAcc), CPos; -merge_chains(#ems{chain_chunk=CC}=Ems, Choose, Chains, Acc, Reporter, Count0) -> +merge_chains(#ems{chain_chunk = CC} = Ems, Choose, Chains, Acc, Reporter, Count0) -> {KV, RestChains} = choose_kv(Choose, Ems, Chains), {NewKVs, NewPrev} = append_item(Ems, Acc, KV, CC), - Count1 = case (Count0 + 1) rem ?REPORT_INTERVAL of - 0 -> - Reporter(Count0), - 0; - _ -> - Count0 + 1 - end, + Count1 = + case (Count0 + 1) rem ?REPORT_INTERVAL of + 0 -> + Reporter(Count0), + 0; + _ -> + Count0 + 1 + end, merge_chains(Ems, Choose, RestChains, {NewKVs, NewPrev}, Reporter, Count1). - init_chains(Ems, Choose, BB) -> - Chains = lists:map(fun(CPos) -> - {ok, {KVs, NextKVs}} = couch_file:pread_term(Ems#ems.fd, CPos), - {KVs, NextKVs} - end, BB), + Chains = lists:map( + fun(CPos) -> + {ok, {KVs, NextKVs}} = couch_file:pread_term(Ems#ems.fd, CPos), + {KVs, NextKVs} + end, + BB + ), order_chains(Choose, Chains). - order_chains(small, Chains) -> lists:sort(Chains); order_chains(big, Chains) -> lists:reverse(lists:sort(Chains)). - choose_kv(_Choose, _Ems, [{[KV], nil} | Rest]) -> {KV, Rest}; choose_kv(Choose, Ems, [{[KV], Pos} | RestChains]) -> @@ -338,26 +323,22 @@ choose_kv(Choose, _Ems, [{[KV | RestKVs], Prev} | RestChains]) -> big -> {KV, ins_big_chain(RestChains, {RestKVs, Prev}, [])} end. - -ins_small_chain([{[{K1,_}|_],_}=C1|Rest], {[{K2,_}|_],_}=C2, Acc) when K1<K2 -> +ins_small_chain([{[{K1, _} | _], _} = C1 | Rest], {[{K2, _} | _], _} = C2, Acc) when K1 < K2 -> ins_small_chain(Rest, C2, [C1 | Acc]); ins_small_chain(Rest, Chain, Acc) -> lists:reverse(Acc, [Chain | Rest]). - -ins_big_chain([{[{K1,_}|_],_}=C1|Rest], {[{K2,_}|_],_}=C2, Acc) when K1>K2 -> +ins_big_chain([{[{K1, _} | _], _} = C1 | Rest], {[{K2, _} | _], _} = C2, Acc) when K1 > K2 -> ins_big_chain(Rest, C2, [C1 | Acc]); ins_big_chain(Rest, Chain, Acc) -> lists:reverse(Acc, [Chain | Rest]). - append_item(Ems, {List, Prev}, Pos, Size) when length(List) >= Size -> {ok, PrevList, _} = couch_file:append_term(Ems#ems.fd, {List, Prev}), {[Pos], PrevList}; append_item(_Ems, {List, Prev}, Pos, _Size) -> {[Pos | List], Prev}. - num_merges(BBChunk, NumBB) when NumBB =< BBChunk -> 0; num_merges(BBChunk, NumBB) when NumBB > BBChunk -> diff --git a/src/couch/src/couch_event_sup.erl b/src/couch/src/couch_event_sup.erl index 32f1b9b68..e9fc2e5db 100644 --- a/src/couch/src/couch_event_sup.erl +++ b/src/couch/src/couch_event_sup.erl @@ -20,8 +20,8 @@ -include_lib("couch/include/couch_db.hrl"). --export([start_link/3,start_link/4, stop/1]). --export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3]). +-export([start_link/3, start_link/4, stop/1]). +-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]). % % Instead calling the @@ -52,10 +52,10 @@ stop(Pid) -> init({EventMgr, EventHandler, Args}) -> case gen_event:add_sup_handler(EventMgr, EventHandler, Args) of - ok -> - {ok, {EventMgr, EventHandler}}; - {stop, Error} -> - {stop, Error} + ok -> + {ok, {EventMgr, EventHandler}}; + {stop, Error} -> + {stop, Error} end. terminate(_Reason, _State) -> diff --git a/src/couch/src/couch_file.erl b/src/couch/src/couch_file.erl index 2948d685b..0e786525f 100644 --- a/src/couch/src/couch_file.erl +++ b/src/couch/src/couch_file.erl @@ -16,10 +16,10 @@ -include_lib("couch/include/couch_db.hrl"). - -define(INITIAL_WAIT, 60000). -define(MONITOR_CHECK, 10000). --define(SIZE_BLOCK, 16#1000). % 4 KiB +% 4 KiB +-define(SIZE_BLOCK, 16#1000). -define(IS_OLD_STATE(S), is_pid(S#file.db_monitor)). -define(PREFIX_SIZE, 5). -define(DEFAULT_READ_COUNT, 1024). @@ -66,38 +66,48 @@ open(Filepath) -> open(Filepath, []). open(Filepath, Options) -> - case gen_server:start_link(couch_file, - {Filepath, Options, self(), Ref = make_ref()}, []) of - {ok, Fd} -> - {ok, Fd}; - ignore -> - % get the error - receive - {Ref, Pid, {error, Reason} = Error} -> - case process_info(self(), trap_exit) of - {trap_exit, true} -> receive {'EXIT', Pid, _} -> ok end; - {trap_exit, false} -> ok - end, - case {lists:member(nologifmissing, Options), Reason} of - {true, enoent} -> ok; - _ -> - couch_log:error("Could not open file ~s: ~s", - [Filepath, file:format_error(Reason)]) - end, + case + gen_server:start_link( + couch_file, + {Filepath, Options, self(), Ref = make_ref()}, + [] + ) + of + {ok, Fd} -> + {ok, Fd}; + ignore -> + % get the error + receive + {Ref, Pid, {error, Reason} = Error} -> + case process_info(self(), trap_exit) of + {trap_exit, true} -> + receive + {'EXIT', Pid, _} -> ok + end; + {trap_exit, false} -> + ok + end, + case {lists:member(nologifmissing, Options), Reason} of + {true, enoent} -> + ok; + _ -> + couch_log:error( + "Could not open file ~s: ~s", + [Filepath, file:format_error(Reason)] + ) + end, + Error + end; + Error -> + % We can't say much here, because it could be any kind of error. + % Just let it bubble and an encapsulating subcomponent can perhaps + % be more informative. It will likely appear in the SASL log, anyway. Error - end; - Error -> - % We can't say much here, because it could be any kind of error. - % Just let it bubble and an encapsulating subcomponent can perhaps - % be more informative. It will likely appear in the SASL log, anyway. - Error end. - set_db_pid(Fd, Pid) -> gen_server:call(Fd, {set_db_pid, Pid}). - %%---------------------------------------------------------------------- %% Purpose: To append an Erlang term to the end of the file. %% Args: Erlang term to serialize and append to the file. @@ -121,7 +131,6 @@ append_term_md5(Fd, Term, Options) -> Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION), append_binary_md5(Fd, couch_compress:compress(Term, Comp)). - %%---------------------------------------------------------------------- %% Purpose: To append an Erlang binary to the end of the file. %% Args: Erlang term to serialize and append to the file. @@ -134,14 +143,15 @@ append_binary(Fd, Bin) -> ioq:call(Fd, {append_bin, assemble_file_chunk(Bin)}, erlang:get(io_priority)). append_binary_md5(Fd, Bin) -> - ioq:call(Fd, + ioq:call( + Fd, {append_bin, assemble_file_chunk(Bin, couch_hash:md5_hash(Bin))}, - erlang:get(io_priority)). + erlang:get(io_priority) + ). append_raw_chunk(Fd, Chunk) -> ioq:call(Fd, {append_bin, Chunk}, erlang:get(io_priority)). - assemble_file_chunk(Bin) -> [<<0:1/integer, (iolist_size(Bin)):31/integer>>, Bin]. @@ -155,12 +165,10 @@ assemble_file_chunk(Bin, Md5) -> %% or {error, Reason}. %%---------------------------------------------------------------------- - pread_term(Fd, Pos) -> {ok, Bin} = pread_binary(Fd, Pos), {ok, couch_compress:decompress(Bin)}. - %%---------------------------------------------------------------------- %% Purpose: Reads a binrary from a file that was written with append_binary %% Args: Pos, the offset into the file where the term is serialized. @@ -172,7 +180,6 @@ pread_binary(Fd, Pos) -> {ok, L} = pread_iolist(Fd, Pos), {ok, iolist_to_binary(L)}. - pread_iolist(Fd, Pos) -> case ioq:call(Fd, {pread_iolist, Pos}, erlang:get(io_priority)) of {ok, IoList, Md5} -> @@ -181,49 +188,52 @@ pread_iolist(Fd, Pos) -> Error end. - pread_terms(Fd, PosList) -> {ok, Bins} = pread_binaries(Fd, PosList), - Terms = lists:map(fun(Bin) -> - couch_compress:decompress(Bin) - end, Bins), + Terms = lists:map( + fun(Bin) -> + couch_compress:decompress(Bin) + end, + Bins + ), {ok, Terms}. - pread_binaries(Fd, PosList) -> {ok, Data} = pread_iolists(Fd, PosList), {ok, lists:map(fun erlang:iolist_to_binary/1, Data)}. - pread_iolists(Fd, PosList) -> case ioq:call(Fd, {pread_iolists, PosList}, erlang:get(io_priority)) of {ok, DataMd5s} -> - Data = lists:zipwith(fun(Pos, {IoList, Md5}) -> - verify_md5(Fd, Pos, IoList, Md5) - end, PosList, DataMd5s), + Data = lists:zipwith( + fun(Pos, {IoList, Md5}) -> + verify_md5(Fd, Pos, IoList, Md5) + end, + PosList, + DataMd5s + ), {ok, Data}; Error -> Error end. - append_terms(Fd, Terms) -> append_terms(Fd, Terms, []). - append_terms(Fd, Terms, Options) -> Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION), - Bins = lists:map(fun(Term) -> - couch_compress:compress(Term, Comp) - end, Terms), + Bins = lists:map( + fun(Term) -> + couch_compress:compress(Term, Comp) + end, + Terms + ), append_binaries(Fd, Bins). - append_binaries(Fd, Bins) -> WriteBins = lists:map(fun assemble_file_chunk/1, Bins), ioq:call(Fd, {append_bins, WriteBins}, erlang:get(io_priority)). - %%---------------------------------------------------------------------- %% Purpose: The length of a file, in bytes. %% Returns: {ok, Bytes} @@ -280,36 +290,42 @@ sync(Fd) -> close(Fd) -> gen_server:call(Fd, close, infinity). - delete(RootDir, Filepath) -> delete(RootDir, Filepath, []). delete(RootDir, FullFilePath, Options) -> - EnableRecovery = config:get_boolean("couchdb", - "enable_database_recovery", false), + EnableRecovery = config:get_boolean( + "couchdb", + "enable_database_recovery", + false + ), Async = not lists:member(sync, Options), Context = couch_util:get_value(context, Options, compaction), case Context =:= delete andalso EnableRecovery of true -> rename_file(FullFilePath); false -> - DeleteAfterRename = config:get_boolean("couchdb", - "delete_after_rename", true), + DeleteAfterRename = config:get_boolean( + "couchdb", + "delete_after_rename", + true + ), delete_file(RootDir, FullFilePath, Async, DeleteAfterRename) end. delete_file(RootDir, Filepath, Async, DeleteAfterRename) -> - DelFile = filename:join([RootDir,".delete", ?b2l(couch_uuids:random())]), + DelFile = filename:join([RootDir, ".delete", ?b2l(couch_uuids:random())]), case file:rename(Filepath, DelFile) of - ok when DeleteAfterRename -> - if (Async) -> - spawn(file, delete, [DelFile]), - ok; - true -> - file:delete(DelFile) - end; - Else -> - Else + ok when DeleteAfterRename -> + if + (Async) -> + spawn(file, delete, [DelFile]), + ok; + true -> + file:delete(DelFile) + end; + Else -> + Else end. rename_file(Original) -> @@ -323,14 +339,21 @@ rename_file(Original) -> deleted_filename(Original) -> {{Y, Mon, D}, {H, Min, S}} = calendar:universal_time(), Suffix = lists:flatten( - io_lib:format(".~w~2.10.0B~2.10.0B." - ++ "~2.10.0B~2.10.0B~2.10.0B.deleted" - ++ filename:extension(Original), [Y, Mon, D, H, Min, S])), + io_lib:format( + ".~w~2.10.0B~2.10.0B." ++ + "~2.10.0B~2.10.0B~2.10.0B.deleted" ++ + filename:extension(Original), + [Y, Mon, D, H, Min, S] + ) + ), filename:rootname(Original) ++ Suffix. nuke_dir(RootDelDir, Dir) -> - EnableRecovery = config:get_boolean("couchdb", - "enable_database_recovery", false), + EnableRecovery = config:get_boolean( + "couchdb", + "enable_database_recovery", + false + ), case EnableRecovery of true -> rename_file(Dir); @@ -339,8 +362,11 @@ nuke_dir(RootDelDir, Dir) -> end. delete_dir(RootDelDir, Dir) -> - DeleteAfterRename = config:get_boolean("couchdb", - "delete_after_rename", true), + DeleteAfterRename = config:get_boolean( + "couchdb", + "delete_after_rename", + true + ), FoldFun = fun(File) -> Path = Dir ++ "/" ++ File, case filelib:is_dir(Path) of @@ -359,27 +385,30 @@ delete_dir(RootDelDir, Dir) -> ok end. - init_delete_dir(RootDir) -> - Dir = filename:join(RootDir,".delete"), + Dir = filename:join(RootDir, ".delete"), % note: ensure_dir requires an actual filename companent, which is the % reason for "foo". - filelib:ensure_dir(filename:join(Dir,"foo")), + filelib:ensure_dir(filename:join(Dir, "foo")), spawn(fun() -> - filelib:fold_files(Dir, ".*", true, + filelib:fold_files( + Dir, + ".*", + true, fun(Filename, _) -> ok = file:delete(Filename) - end, ok) + end, + ok + ) end), ok. - read_header(Fd) -> case ioq:call(Fd, find_header, erlang:get(io_priority)) of - {ok, Bin} -> - {ok, binary_to_term(Bin)}; - Else -> - Else + {ok, Bin} -> + {ok, binary_to_term(Bin)}; + Else -> + Else end. write_header(Fd, Data) -> @@ -389,17 +418,14 @@ write_header(Fd, Data) -> FinalBin = <<Md5/binary, Bin/binary>>, ioq:call(Fd, {write_header, FinalBin}, erlang:get(io_priority)). - init_status_error(ReturnPid, Ref, Error) -> ReturnPid ! {Ref, self(), Error}, ignore. - last_read(Fd) when is_pid(Fd) -> Now = os:timestamp(), couch_util:process_dict_get(Fd, read_timestamp, Now). - % server functions init({Filepath, Options, ReturnPid, Ref}) -> @@ -408,66 +434,67 @@ init({Filepath, Options, ReturnPid, Ref}) -> IsSys = lists:member(sys_db, Options), update_read_timestamp(), case lists:member(create, Options) of - true -> - filelib:ensure_dir(Filepath), - case file:open(Filepath, OpenOptions) of - {ok, Fd} -> - %% Save Fd in process dictionary for debugging purposes - put(couch_file_fd, {Fd, Filepath}), - {ok, Length} = file:position(Fd, eof), - case Length > 0 of - true -> - % this means the file already exists and has data. - % FYI: We don't differentiate between empty files and non-existant - % files here. - case lists:member(overwrite, Options) of - true -> - {ok, 0} = file:position(Fd, 0), - ok = file:truncate(Fd), - ok = file:sync(Fd), - maybe_track_open_os_files(Options), - erlang:send_after(?INITIAL_WAIT, self(), maybe_close), - {ok, #file{fd=Fd, is_sys=IsSys, pread_limit=Limit}}; - false -> - ok = file:close(Fd), - init_status_error(ReturnPid, Ref, {error, eexist}) - end; - false -> - maybe_track_open_os_files(Options), - erlang:send_after(?INITIAL_WAIT, self(), maybe_close), - {ok, #file{fd=Fd, is_sys=IsSys, pread_limit=Limit}} - end; - Error -> - init_status_error(ReturnPid, Ref, Error) - end; - false -> - % open in read mode first, so we don't create the file if it doesn't exist. - case file:open(Filepath, [read, raw]) of - {ok, Fd_Read} -> + true -> + filelib:ensure_dir(Filepath), case file:open(Filepath, OpenOptions) of {ok, Fd} -> - %% Save Fd in process dictionary for debugging purposes - put(couch_file_fd, {Fd, Filepath}), - ok = file:close(Fd_Read), - maybe_track_open_os_files(Options), - {ok, Eof} = file:position(Fd, eof), - erlang:send_after(?INITIAL_WAIT, self(), maybe_close), - {ok, #file{fd=Fd, eof=Eof, is_sys=IsSys, pread_limit=Limit}}; - Error -> - init_status_error(ReturnPid, Ref, Error) + %% Save Fd in process dictionary for debugging purposes + put(couch_file_fd, {Fd, Filepath}), + {ok, Length} = file:position(Fd, eof), + case Length > 0 of + true -> + % this means the file already exists and has data. + % FYI: We don't differentiate between empty files and non-existant + % files here. + case lists:member(overwrite, Options) of + true -> + {ok, 0} = file:position(Fd, 0), + ok = file:truncate(Fd), + ok = file:sync(Fd), + maybe_track_open_os_files(Options), + erlang:send_after(?INITIAL_WAIT, self(), maybe_close), + {ok, #file{fd = Fd, is_sys = IsSys, pread_limit = Limit}}; + false -> + ok = file:close(Fd), + init_status_error(ReturnPid, Ref, {error, eexist}) + end; + false -> + maybe_track_open_os_files(Options), + erlang:send_after(?INITIAL_WAIT, self(), maybe_close), + {ok, #file{fd = Fd, is_sys = IsSys, pread_limit = Limit}} + end; + Error -> + init_status_error(ReturnPid, Ref, Error) end; - Error -> - init_status_error(ReturnPid, Ref, Error) - end + false -> + % open in read mode first, so we don't create the file if it doesn't exist. + case file:open(Filepath, [read, raw]) of + {ok, Fd_Read} -> + case file:open(Filepath, OpenOptions) of + {ok, Fd} -> + %% Save Fd in process dictionary for debugging purposes + put(couch_file_fd, {Fd, Filepath}), + ok = file:close(Fd_Read), + maybe_track_open_os_files(Options), + {ok, Eof} = file:position(Fd, eof), + erlang:send_after(?INITIAL_WAIT, self(), maybe_close), + {ok, #file{fd = Fd, eof = Eof, is_sys = IsSys, pread_limit = Limit}}; + Error -> + init_status_error(ReturnPid, Ref, Error) + end; + Error -> + init_status_error(ReturnPid, Ref, Error) + end end. file_open_options(Options) -> - [read, raw, binary] ++ case lists:member(read_only, Options) of - true -> - []; - false -> - [append] - end. + [read, raw, binary] ++ + case lists:member(read_only, Options) of + true -> + []; + false -> + [append] + end. maybe_track_open_os_files(Options) -> case not lists:member(sys_db, Options) of @@ -484,59 +511,62 @@ terminate(_Reason, #file{fd = Fd}) -> handle_call(Msg, From, File) when ?IS_OLD_STATE(File) -> handle_call(Msg, From, upgrade_state(File)); - -handle_call(close, _From, #file{fd=Fd}=File) -> +handle_call(close, _From, #file{fd = Fd} = File) -> {stop, normal, file:close(Fd), File#file{fd = nil}}; - handle_call({pread_iolist, Pos}, _From, File) -> update_read_timestamp(), {LenIolist, NextPos} = read_raw_iolist_int(File, Pos, 4), case iolist_to_binary(LenIolist) of - <<1:1/integer,Len:31/integer>> -> % an MD5-prefixed term - {Md5AndIoList, _} = read_raw_iolist_int(File, NextPos, Len+16), - {Md5, IoList} = extract_md5(Md5AndIoList), - {reply, {ok, IoList, Md5}, File}; - <<0:1/integer,Len:31/integer>> -> - {Iolist, _} = read_raw_iolist_int(File, NextPos, Len), - {reply, {ok, Iolist, <<>>}, File} + % an MD5-prefixed term + <<1:1/integer, Len:31/integer>> -> + {Md5AndIoList, _} = read_raw_iolist_int(File, NextPos, Len + 16), + {Md5, IoList} = extract_md5(Md5AndIoList), + {reply, {ok, IoList, Md5}, File}; + <<0:1/integer, Len:31/integer>> -> + {Iolist, _} = read_raw_iolist_int(File, NextPos, Len), + {reply, {ok, Iolist, <<>>}, File} end; - handle_call({pread_iolists, PosL}, _From, File) -> update_read_timestamp(), LocNums1 = [{Pos, 4} || Pos <- PosL], DataSizes = read_multi_raw_iolists_int(File, LocNums1), - LocNums2 = lists:map(fun({LenIoList, NextPos}) -> - case iolist_to_binary(LenIoList) of - <<1:1/integer, Len:31/integer>> -> % an MD5-prefixed term - {NextPos, Len + 16}; - <<0:1/integer, Len:31/integer>> -> - {NextPos, Len} - end - end, DataSizes), + LocNums2 = lists:map( + fun({LenIoList, NextPos}) -> + case iolist_to_binary(LenIoList) of + % an MD5-prefixed term + <<1:1/integer, Len:31/integer>> -> + {NextPos, Len + 16}; + <<0:1/integer, Len:31/integer>> -> + {NextPos, Len} + end + end, + DataSizes + ), Resps = read_multi_raw_iolists_int(File, LocNums2), - Extracted = lists:zipwith(fun({LenIoList, _}, {IoList, _}) -> - case iolist_to_binary(LenIoList) of - <<1:1/integer, _:31/integer>> -> - {Md5, IoList} = extract_md5(IoList), - {IoList, Md5}; - <<0:1/integer, _:31/integer>> -> - {IoList, <<>>} - end - end, DataSizes, Resps), + Extracted = lists:zipwith( + fun({LenIoList, _}, {IoList, _}) -> + case iolist_to_binary(LenIoList) of + <<1:1/integer, _:31/integer>> -> + {Md5, IoList} = extract_md5(IoList), + {IoList, Md5}; + <<0:1/integer, _:31/integer>> -> + {IoList, <<>>} + end + end, + DataSizes, + Resps + ), {reply, {ok, Extracted}, File}; - handle_call(bytes, _From, #file{fd = Fd} = File) -> {reply, file:position(Fd, eof), File}; - -handle_call({set_db_pid, Pid}, _From, #file{db_monitor=OldRef}=File) -> +handle_call({set_db_pid, Pid}, _From, #file{db_monitor = OldRef} = File) -> case is_reference(OldRef) of true -> demonitor(OldRef, [flush]); false -> ok end, Ref = monitor(process, Pid), - {reply, ok, File#file{db_monitor=Ref}}; - -handle_call(sync, _From, #file{fd=Fd}=File) -> + {reply, ok, File#file{db_monitor = Ref}}; +handle_call(sync, _From, #file{fd = Fd} = File) -> case file:sync(Fd) of ok -> {reply, ok, File}; @@ -547,68 +577,66 @@ handle_call(sync, _From, #file{fd=Fd}=File) -> % can't fathom. {stop, Error, Error, #file{fd = nil}} end; - -handle_call({truncate, Pos}, _From, #file{fd=Fd}=File) -> +handle_call({truncate, Pos}, _From, #file{fd = Fd} = File) -> {ok, Pos} = file:position(Fd, Pos), case file:truncate(Fd) of - ok -> - {reply, ok, File#file{eof = Pos}}; - Error -> - {reply, Error, File} + ok -> + {reply, ok, File#file{eof = Pos}}; + Error -> + {reply, Error, File} end; - handle_call({append_bin, Bin}, _From, #file{fd = Fd, eof = Pos} = File) -> Blocks = make_blocks(Pos rem ?SIZE_BLOCK, Bin), Size = iolist_size(Blocks), case file:write(Fd, Blocks) of - ok -> - {reply, {ok, Pos, Size}, File#file{eof = Pos + Size}}; - Error -> - {reply, Error, reset_eof(File)} + ok -> + {reply, {ok, Pos, Size}, File#file{eof = Pos + Size}}; + Error -> + {reply, Error, reset_eof(File)} end; - handle_call({append_bins, Bins}, _From, #file{fd = Fd, eof = Pos} = File) -> - {BlockResps, FinalPos} = lists:mapfoldl(fun(Bin, PosAcc) -> - Blocks = make_blocks(PosAcc rem ?SIZE_BLOCK, Bin), - Size = iolist_size(Blocks), - {{Blocks, {PosAcc, Size}}, PosAcc + Size} - end, Pos, Bins), + {BlockResps, FinalPos} = lists:mapfoldl( + fun(Bin, PosAcc) -> + Blocks = make_blocks(PosAcc rem ?SIZE_BLOCK, Bin), + Size = iolist_size(Blocks), + {{Blocks, {PosAcc, Size}}, PosAcc + Size} + end, + Pos, + Bins + ), {AllBlocks, Resps} = lists:unzip(BlockResps), case file:write(Fd, AllBlocks) of - ok -> - {reply, {ok, Resps}, File#file{eof = FinalPos}}; - Error -> - {reply, Error, reset_eof(File)} + ok -> + {reply, {ok, Resps}, File#file{eof = FinalPos}}; + Error -> + {reply, Error, reset_eof(File)} end; - handle_call({write_header, Bin}, _From, #file{fd = Fd, eof = Pos} = File) -> BinSize = byte_size(Bin), case Pos rem ?SIZE_BLOCK of - 0 -> - Padding = <<>>; - BlockOffset -> - Padding = <<0:(8*(?SIZE_BLOCK-BlockOffset))>> + 0 -> + Padding = <<>>; + BlockOffset -> + Padding = <<0:(8 * (?SIZE_BLOCK - BlockOffset))>> end, FinalBin = [Padding, <<1, BinSize:32/integer>> | make_blocks(5, [Bin])], case file:write(Fd, FinalBin) of - ok -> - {reply, ok, File#file{eof = Pos + iolist_size(FinalBin)}}; - Error -> - {reply, Error, reset_eof(File)} + ok -> + {reply, ok, File#file{eof = Pos + iolist_size(FinalBin)}}; + Error -> + {reply, Error, reset_eof(File)} end; - handle_call(find_header, _From, #file{fd = Fd, eof = Pos} = File) -> {reply, find_header(Fd, Pos div ?SIZE_BLOCK), File}. handle_cast(close, Fd) -> - {stop,normal,Fd}. + {stop, normal, Fd}. code_change(_OldVsn, State, _Extra) -> {ok, State}. handle_info(Msg, File) when ?IS_OLD_STATE(File) -> handle_info(Msg, upgrade_state(File)); - handle_info(maybe_close, File) -> case is_idle(File) of true -> @@ -617,8 +645,7 @@ handle_info(maybe_close, File) -> erlang:send_after(?MONITOR_CHECK, self(), maybe_close), {noreply, File} end; - -handle_info({'DOWN', Ref, process, _Pid, _Info}, #file{db_monitor=Ref}=File) -> +handle_info({'DOWN', Ref, process, _Pid, _Info}, #file{db_monitor = Ref} = File) -> case is_idle(File) of true -> {stop, normal, File}; false -> {noreply, File} @@ -630,12 +657,13 @@ format_status(_Opt, [PDict, #file{} = File]) -> find_header(Fd, Block) -> case (catch load_header(Fd, Block)) of - {ok, Bin} -> - {ok, Bin}; - _Error -> - ReadCount = config:get_integer( - "couchdb", "find_header_read_count", ?DEFAULT_READ_COUNT), - find_header(Fd, Block -1, ReadCount) + {ok, Bin} -> + {ok, Bin}; + _Error -> + ReadCount = config:get_integer( + "couchdb", "find_header_read_count", ?DEFAULT_READ_COUNT + ), + find_header(Fd, Block - 1, ReadCount) end. load_header(Fd, Block) -> @@ -648,22 +676,22 @@ load_header(Fd, Pos, HeaderLen) -> load_header(Fd, Pos, HeaderLen, RestBlock) -> TotalBytes = calculate_total_read_len(?PREFIX_SIZE, HeaderLen), - RawBin = case TotalBytes =< byte_size(RestBlock) of - true -> - <<RawBin0:TotalBytes/binary, _/binary>> = RestBlock, - RawBin0; - false -> - ReadStart = Pos + ?PREFIX_SIZE + byte_size(RestBlock), - ReadLen = TotalBytes - byte_size(RestBlock), - {ok, Missing} = file:pread(Fd, ReadStart, ReadLen), - <<RestBlock/binary, Missing/binary>> - end, + RawBin = + case TotalBytes =< byte_size(RestBlock) of + true -> + <<RawBin0:TotalBytes/binary, _/binary>> = RestBlock, + RawBin0; + false -> + ReadStart = Pos + ?PREFIX_SIZE + byte_size(RestBlock), + ReadLen = TotalBytes - byte_size(RestBlock), + {ok, Missing} = file:pread(Fd, ReadStart, ReadLen), + <<RestBlock/binary, Missing/binary>> + end, <<Md5Sig:16/binary, HeaderBin/binary>> = iolist_to_binary(remove_block_prefixes(?PREFIX_SIZE, RawBin)), Md5Sig = couch_hash:md5_hash(HeaderBin), {ok, HeaderBin}. - %% Read multiple block locations using a single file:pread/2. -spec find_header(file:fd(), block_id(), non_neg_integer()) -> {ok, binary()} | no_valid_header. @@ -671,23 +699,28 @@ find_header(_Fd, Block, _ReadCount) when Block < 0 -> no_valid_header; find_header(Fd, Block, ReadCount) -> FirstBlock = max(0, Block - ReadCount + 1), - BlockLocations = [?SIZE_BLOCK*B || B <- lists:seq(FirstBlock, Block)], + BlockLocations = [?SIZE_BLOCK * B || B <- lists:seq(FirstBlock, Block)], {ok, DataL} = file:pread(Fd, [{L, ?PREFIX_SIZE} || L <- BlockLocations]), %% Since BlockLocations are ordered from oldest to newest, we rely %% on lists:foldl/3 to reverse the order, making HeaderLocations %% correctly ordered from newest to oldest. - HeaderLocations = lists:foldl(fun - ({Loc, <<1, HeaderSize:32/integer>>}, Acc) -> - [{Loc, HeaderSize} | Acc]; - (_, Acc) -> - Acc - end, [], lists:zip(BlockLocations, DataL)), + HeaderLocations = lists:foldl( + fun + ({Loc, <<1, HeaderSize:32/integer>>}, Acc) -> + [{Loc, HeaderSize} | Acc]; + (_, Acc) -> + Acc + end, + [], + lists:zip(BlockLocations, DataL) + ), case find_newest_header(Fd, HeaderLocations) of {ok, _Location, HeaderBin} -> {ok, HeaderBin}; _ -> ok = file:advise( - Fd, hd(BlockLocations), ReadCount * ?SIZE_BLOCK, dont_need), + Fd, hd(BlockLocations), ReadCount * ?SIZE_BLOCK, dont_need + ), NextBlock = hd(BlockLocations) div ?SIZE_BLOCK - 1, find_header(Fd, NextBlock, ReadCount) end. @@ -704,10 +737,10 @@ find_newest_header(Fd, [{Location, Size} | LocationSizes]) -> find_newest_header(Fd, LocationSizes) end. - --spec read_raw_iolist_int(#file{}, Pos::non_neg_integer(), Len::non_neg_integer()) -> - {Data::iolist(), CurPos::non_neg_integer()}. -read_raw_iolist_int(Fd, {Pos, _Size}, Len) -> % 0110 UPGRADE CODE +-spec read_raw_iolist_int(#file{}, Pos :: non_neg_integer(), Len :: non_neg_integer()) -> + {Data :: iolist(), CurPos :: non_neg_integer()}. +% 0110 UPGRADE CODE +read_raw_iolist_int(Fd, {Pos, _Size}, Len) -> read_raw_iolist_int(Fd, Pos, Len); read_raw_iolist_int(#file{fd = Fd} = File, Pos, Len) -> {Pos, TotalBytes} = get_pread_locnum(File, Pos, Len), @@ -727,33 +760,38 @@ read_raw_iolist_int(#file{fd = Fd} = File, Pos, Len) -> % TODO: check if this is really unused read_multi_raw_iolists_int(#file{fd = Fd} = File, PosLens) -> - LocNums = lists:map(fun({Pos, Len}) -> - get_pread_locnum(File, Pos, Len) - end, PosLens), + LocNums = lists:map( + fun({Pos, Len}) -> + get_pread_locnum(File, Pos, Len) + end, + PosLens + ), {ok, Bins} = file:pread(Fd, LocNums), - lists:zipwith(fun({Pos, TotalBytes}, Bin) -> - <<RawBin:TotalBytes/binary>> = Bin, - {remove_block_prefixes(Pos rem ?SIZE_BLOCK, RawBin), Pos + TotalBytes} - end, LocNums, Bins). - + lists:zipwith( + fun({Pos, TotalBytes}, Bin) -> + <<RawBin:TotalBytes/binary>> = Bin, + {remove_block_prefixes(Pos rem ?SIZE_BLOCK, RawBin), Pos + TotalBytes} + end, + LocNums, + Bins + ). get_pread_locnum(File, Pos, Len) -> BlockOffset = Pos rem ?SIZE_BLOCK, TotalBytes = calculate_total_read_len(BlockOffset, Len), case Pos + TotalBytes of - Size when Size > File#file.eof -> - couch_stats:increment_counter([pread, exceed_eof]), - {_Fd, Filepath} = get(couch_file_fd), - throw({read_beyond_eof, Filepath}); - Size when Size > File#file.pread_limit -> - couch_stats:increment_counter([pread, exceed_limit]), - {_Fd, Filepath} = get(couch_file_fd), - throw({exceed_pread_limit, Filepath, File#file.pread_limit}); - _ -> - {Pos, TotalBytes} + Size when Size > File#file.eof -> + couch_stats:increment_counter([pread, exceed_eof]), + {_Fd, Filepath} = get(couch_file_fd), + throw({read_beyond_eof, Filepath}); + Size when Size > File#file.pread_limit -> + couch_stats:increment_counter([pread, exceed_limit]), + {_Fd, Filepath} = get(couch_file_fd), + throw({exceed_pread_limit, Filepath, File#file.pread_limit}); + _ -> + {Pos, TotalBytes} end. - -spec extract_md5(iolist()) -> {binary(), iolist()}. extract_md5(FullIoList) -> {Md5List, IoList} = split_iolist(FullIoList, 16, []), @@ -763,26 +801,28 @@ calculate_total_read_len(0, FinalLen) -> calculate_total_read_len(1, FinalLen) + 1; calculate_total_read_len(BlockOffset, FinalLen) -> case ?SIZE_BLOCK - BlockOffset of - BlockLeft when BlockLeft >= FinalLen -> - FinalLen; - BlockLeft -> - FinalLen + ((FinalLen - BlockLeft) div (?SIZE_BLOCK -1)) + - if ((FinalLen - BlockLeft) rem (?SIZE_BLOCK -1)) =:= 0 -> 0; - true -> 1 end + BlockLeft when BlockLeft >= FinalLen -> + FinalLen; + BlockLeft -> + FinalLen + ((FinalLen - BlockLeft) div (?SIZE_BLOCK - 1)) + + if + ((FinalLen - BlockLeft) rem (?SIZE_BLOCK - 1)) =:= 0 -> 0; + true -> 1 + end end. remove_block_prefixes(_BlockOffset, <<>>) -> []; -remove_block_prefixes(0, <<_BlockPrefix,Rest/binary>>) -> +remove_block_prefixes(0, <<_BlockPrefix, Rest/binary>>) -> remove_block_prefixes(1, Rest); remove_block_prefixes(BlockOffset, Bin) -> BlockBytesAvailable = ?SIZE_BLOCK - BlockOffset, case size(Bin) of - Size when Size > BlockBytesAvailable -> - <<DataBlock:BlockBytesAvailable/binary,Rest/binary>> = Bin, - [DataBlock | remove_block_prefixes(0, Rest)]; - _Size -> - [Bin] + Size when Size > BlockBytesAvailable -> + <<DataBlock:BlockBytesAvailable/binary, Rest/binary>> = Bin, + [DataBlock | remove_block_prefixes(0, Rest)]; + _Size -> + [Bin] end. make_blocks(_BlockOffset, []) -> @@ -791,16 +831,16 @@ make_blocks(0, IoList) -> [<<0>> | make_blocks(1, IoList)]; make_blocks(BlockOffset, IoList) -> case split_iolist(IoList, (?SIZE_BLOCK - BlockOffset), []) of - {Begin, End} -> - [Begin | make_blocks(0, End)]; - _SplitRemaining -> - IoList + {Begin, End} -> + [Begin | make_blocks(0, End)]; + _SplitRemaining -> + IoList end. %% @doc Returns a tuple where the first element contains the leading SplitAt %% bytes of the original iolist, and the 2nd element is the tail. If SplitAt %% is larger than byte_size(IoList), return the difference. --spec split_iolist(IoList::iolist(), SplitAt::non_neg_integer(), Acc::list()) -> +-spec split_iolist(IoList :: iolist(), SplitAt :: non_neg_integer(), Acc :: list()) -> {iolist(), iolist()} | non_neg_integer(). split_iolist(List, 0, BeginAcc) -> {lists:reverse(BeginAcc), List}; @@ -809,14 +849,14 @@ split_iolist([], SplitAt, _BeginAcc) -> split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) when SplitAt > byte_size(Bin) -> split_iolist(Rest, SplitAt - byte_size(Bin), [Bin | BeginAcc]); split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) -> - <<Begin:SplitAt/binary,End/binary>> = Bin, + <<Begin:SplitAt/binary, End/binary>> = Bin, split_iolist([End | Rest], 0, [Begin | BeginAcc]); -split_iolist([Sublist| Rest], SplitAt, BeginAcc) when is_list(Sublist) -> +split_iolist([Sublist | Rest], SplitAt, BeginAcc) when is_list(Sublist) -> case split_iolist(Sublist, SplitAt, BeginAcc) of - {Begin, End} -> - {Begin, [End | Rest]}; - SplitRemaining -> - split_iolist(Rest, SplitAt - (SplitAt - SplitRemaining), [Sublist | BeginAcc]) + {Begin, End} -> + {Begin, [End | Rest]}; + SplitRemaining -> + split_iolist(Rest, SplitAt - (SplitAt - SplitRemaining), [Sublist | BeginAcc]) end; split_iolist([Byte | Rest], SplitAt, BeginAcc) when is_integer(Byte) -> split_iolist(Rest, SplitAt - 1, [Byte | BeginAcc]). @@ -825,29 +865,25 @@ monitored_by_pids() -> {monitored_by, PidsAndRefs} = process_info(self(), monitored_by), lists:filter(fun is_pid/1, PidsAndRefs). - verify_md5(_Fd, _Pos, IoList, <<>>) -> IoList; - verify_md5(Fd, Pos, IoList, Md5) -> case couch_hash:md5_hash(IoList) of Md5 -> IoList; _ -> report_md5_error(Fd, Pos) end. - report_md5_error(Fd, Pos) -> couch_log:emergency("File corruption in ~p at position ~B", [Fd, Pos]), exit({file_corruption, <<"file corruption">>}). - % System dbs aren't monitored by couch_stats_process_tracker -is_idle(#file{is_sys=true}) -> +is_idle(#file{is_sys = true}) -> case monitored_by_pids() of [] -> true; _ -> false end; -is_idle(#file{is_sys=false}) -> +is_idle(#file{is_sys = false}) -> Tracker = whereis(couch_stats_process_tracker), case monitored_by_pids() of [] -> true; @@ -865,10 +901,10 @@ process_info(Pid) -> update_read_timestamp() -> put(read_timestamp, os:timestamp()). -upgrade_state(#file{db_monitor=DbPid}=File) when is_pid(DbPid) -> +upgrade_state(#file{db_monitor = DbPid} = File) when is_pid(DbPid) -> unlink(DbPid), Ref = monitor(process, DbPid), - File#file{db_monitor=Ref}; + File#file{db_monitor = Ref}; upgrade_state(State) -> State. @@ -889,21 +925,26 @@ reset_eof(#file{} = File) -> deleted_filename_test_() -> DbNames = ["dbname", "db.name", "user/dbname"], Fixtures = make_filename_fixtures(DbNames), - lists:map(fun(Fixture) -> - should_create_proper_deleted_filename(Fixture) - end, Fixtures). + lists:map( + fun(Fixture) -> + should_create_proper_deleted_filename(Fixture) + end, + Fixtures + ). should_create_proper_deleted_filename(Before) -> {Before, - ?_test(begin - BeforeExtension = filename:extension(Before), - BeforeBasename = filename:basename(Before, BeforeExtension), - Re = "^" ++ BeforeBasename ++ "\.[0-9]{8}\.[0-9]{6}\.deleted\..*$", - After = deleted_filename(Before), - ?assertEqual(match, - re:run(filename:basename(After), Re, [{capture, none}])), - ?assertEqual(BeforeExtension, filename:extension(After)) - end)}. + ?_test(begin + BeforeExtension = filename:extension(Before), + BeforeBasename = filename:basename(Before, BeforeExtension), + Re = "^" ++ BeforeBasename ++ "\.[0-9]{8}\.[0-9]{6}\.deleted\..*$", + After = deleted_filename(Before), + ?assertEqual( + match, + re:run(filename:basename(After), Re, [{capture, none}]) + ), + ?assertEqual(BeforeExtension, filename:extension(After)) + end)}. make_filename_fixtures(DbNames) -> Formats = [ @@ -912,12 +953,18 @@ make_filename_fixtures(DbNames) -> "shards/00000000-1fffffff/~s.1458336317.couch", ".shards/00000000-1fffffff/~s.1458336317_design", ".shards/00000000-1fffffff/~s.1458336317_design" - "/mrview/3133e28517e89a3e11435dd5ac4ad85a.view" + "/mrview/3133e28517e89a3e11435dd5ac4ad85a.view" ], - lists:flatmap(fun(DbName) -> - lists:map(fun(Format) -> - filename:join("/srv/data", io_lib:format(Format, [DbName])) - end, Formats) - end, DbNames). + lists:flatmap( + fun(DbName) -> + lists:map( + fun(Format) -> + filename:join("/srv/data", io_lib:format(Format, [DbName])) + end, + Formats + ) + end, + DbNames + ). -endif. diff --git a/src/couch/src/couch_flags.erl b/src/couch/src/couch_flags.erl index 5cfe7f6d1..42d585f2e 100644 --- a/src/couch/src/couch_flags.erl +++ b/src/couch/src/couch_flags.erl @@ -64,13 +64,13 @@ -include_lib("mem3/include/mem3.hrl"). -include("couch_db_int.hrl"). --type subject() - :: #db{} - | #httpd{} - | #shard{} - | #ordered_shard{} - | string() - | binary(). +-type subject() :: + #db{} + | #httpd{} + | #shard{} + | #ordered_shard{} + | string() + | binary(). -define(SERVICE_ID, feature_flags). @@ -79,8 +79,10 @@ enabled(Subject) -> Key = maybe_handle(subject_key, [Subject], fun subject_key/1), Handle = couch_epi:get_handle({flags, config}), - lists:usort(enabled(Handle, {<<"/", Key/binary>>}) - ++ enabled(Handle, {couch_db:normalize_dbname(Key)})). + lists:usort( + enabled(Handle, {<<"/", Key/binary>>}) ++ + enabled(Handle, {couch_db:normalize_dbname(Key)}) + ). -spec is_enabled(FlagId :: atom(), subject()) -> boolean(). @@ -106,9 +108,9 @@ enabled(Handle, Key) -> subject_key(#db{name = Name}) -> subject_key(Name); -subject_key(#httpd{path_parts=[Name | _Rest]}) -> +subject_key(#httpd{path_parts = [Name | _Rest]}) -> subject_key(Name); -subject_key(#httpd{path_parts=[]}) -> +subject_key(#httpd{path_parts = []}) -> <<>>; subject_key(#shard{name = Name}) -> subject_key(Name); @@ -120,9 +122,10 @@ subject_key(Name) when is_binary(Name) -> Name. -spec maybe_handle( - Function :: atom(), - Args :: [term()], - Default :: fun((Args :: [term()]) -> term())) -> + Function :: atom(), + Args :: [term()], + Default :: fun((Args :: [term()]) -> term()) +) -> term(). maybe_handle(Func, Args, Default) -> diff --git a/src/couch/src/couch_flags_config.erl b/src/couch/src/couch_flags_config.erl index 104a48257..a50f4411f 100644 --- a/src/couch/src/couch_flags_config.erl +++ b/src/couch/src/couch_flags_config.erl @@ -28,23 +28,26 @@ -define(DATA_INTERVAL, 1000). -define(MAX_FLAG_NAME_LENGTH, 256). --type pattern() - :: binary(). %% non empty binary which optionally can end with * +-type pattern() :: + %% non empty binary which optionally can end with * + binary(). -type flag_id() :: atom(). -type flags() :: list(flag_id()). --type parse_pattern() - :: { - binary(), %% pattern without trainig * if it is present - pattern(), - IsWildCard :: boolean(), %% true if the pattern has training * - PatternSize :: pos_integer() - }. +-type parse_pattern() :: + { + %% pattern without trainig * if it is present + binary(), + pattern(), + %% true if the pattern has training * + IsWildCard :: boolean(), + PatternSize :: pos_integer() + }. --type rule() - :: { +-type rule() :: + { parse_pattern(), EnabledFlags :: flags(), DisabledFlags :: flags() @@ -75,26 +78,29 @@ data() -> data(Config) -> ByPattern = collect_rules(Config), - lists:reverse([{{P}, {P, size(P), E -- D}} || {P, {_, E, D}} <- ByPattern]). + lists:reverse([{{P}, {P, size(P), E -- D}} || {P, {_, E, D}} <- ByPattern]). -spec parse_rules([{Key :: string(), Value :: string()}]) -> [rule()]. parse_rules(Config) -> - lists:filtermap(fun({K, V}) -> - case parse_rule(K, V) of - {error, {Format, Args}} -> - couch_log:error(Format, Args), - false; - Rule -> - {true, Rule} - end - end, Config). + lists:filtermap( + fun({K, V}) -> + case parse_rule(K, V) of + {error, {Format, Args}} -> + couch_log:error(Format, Args), + false; + Rule -> + {true, Rule} + end + end, + Config + ). -spec parse_rule(Key :: string(), Value :: string()) -> rule() | {error, Reason :: term()}. -parse_rule(Key, "true") -> +parse_rule(Key, "true") -> parse_flags(binary:split(list_to_binary(Key), <<"||">>), true); parse_rule(Key, "false") -> parse_flags(binary:split(list_to_binary(Key), <<"||">>), false); @@ -119,29 +125,32 @@ parse_flags([FlagsBin, PatternBin], Value) -> end; parse_flags(_Tokens, _) -> couch_log:error( - "Key should be in the form of `[flags]||pattern` (got ~s)", []), + "Key should be in the form of `[flags]||pattern` (got ~s)", [] + ), false. -spec parse_flags_term(Flags :: binary()) -> [flag_id()] | {error, Reason :: term()}. parse_flags_term(FlagsBin) -> - {Flags, Errors} = lists:splitwith(fun erlang:is_atom/1, - [parse_flag(F) || F <- split_by_comma(FlagsBin)]), + {Flags, Errors} = lists:splitwith( + fun erlang:is_atom/1, + [parse_flag(F) || F <- split_by_comma(FlagsBin)] + ), case Errors of - [] -> - lists:usort(Flags); - _ -> - {error, { - "Cannot parse list of tags: ~n~p", - Errors - }} + [] -> + lists:usort(Flags); + _ -> + {error, { + "Cannot parse list of tags: ~n~p", + Errors + }} end. split_by_comma(Binary) -> case binary:split(Binary, <<",">>, [global]) of - [<<>>] -> []; - Tokens -> Tokens + [<<>>] -> []; + Tokens -> Tokens end. parse_flag(FlagName) when size(FlagName) > ?MAX_FLAG_NAME_LENGTH -> @@ -149,7 +158,7 @@ parse_flag(FlagName) when size(FlagName) > ?MAX_FLAG_NAME_LENGTH -> parse_flag(FlagName) -> FlagNameS = string:strip(binary_to_list(FlagName)), try - list_to_existing_atom(FlagNameS) + list_to_existing_atom(FlagNameS) catch _:_ -> {invalid_flag, FlagName} end. @@ -172,8 +181,10 @@ parse_pattern(PatternBin) -> collect_rules(ConfigData) -> ByKey = by_key(parse_rules(ConfigData)), Keys = lists:sort(fun sort_by_length/2, gb_trees:keys(ByKey)), - FuzzyKeys = lists:sort(fun sort_by_length/2, - [K || {K, {{_, _, true, _}, _, _}} <- gb_trees:to_list(ByKey)]), + FuzzyKeys = lists:sort( + fun sort_by_length/2, + [K || {K, {{_, _, true, _}, _, _}} <- gb_trees:to_list(ByKey)] + ), Rules = collect_rules(lists:reverse(Keys), FuzzyKeys, ByKey), gb_trees:to_list(Rules). @@ -185,17 +196,22 @@ sort_by_length(A, B) -> -spec by_key(Items :: [rule()]) -> Dictionary :: gb_trees:tree(). by_key(Items) -> - lists:foldl(fun({{_, K, _, _}, _, _} = Item, Acc) -> - update_element(Acc, K, Item, fun(Value) -> - update_flags(Value, Item) - end) - end, gb_trees:empty(), Items). + lists:foldl( + fun({{_, K, _, _}, _, _} = Item, Acc) -> + update_element(Acc, K, Item, fun(Value) -> + update_flags(Value, Item) + end) + end, + gb_trees:empty(), + Items + ). -spec update_element( - Tree :: gb_trees:tree(), - Key :: pattern(), - Default :: rule(), - Fun :: fun((Item :: rule()) -> rule())) -> + Tree :: gb_trees:tree(), + Key :: pattern(), + Default :: rule(), + Fun :: fun((Item :: rule()) -> rule()) +) -> gb_trees:tree(). update_element(Tree, Key, Default, Fun) -> @@ -207,9 +223,10 @@ update_element(Tree, Key, Default, Fun) -> end. -spec collect_rules( - Keys :: [pattern()], - FuzzyKeys :: [pattern()], - ByKey :: gb_trees:tree()) -> + Keys :: [pattern()], + FuzzyKeys :: [pattern()], + ByKey :: gb_trees:tree() +) -> gb_trees:tree(). collect_rules([], _, Acc) -> @@ -218,9 +235,10 @@ collect_rules([Current | Rest], Items, Acc) -> collect_rules(Rest, Items -- [Current], inherit_flags(Current, Items, Acc)). -spec inherit_flags( - Current :: pattern(), - FuzzyKeys :: [pattern()], - ByKey :: gb_trees:tree()) -> + Current :: pattern(), + FuzzyKeys :: [pattern()], + ByKey :: gb_trees:tree() +) -> gb_trees:tree(). inherit_flags(_Current, [], Acc) -> @@ -234,9 +252,10 @@ inherit_flags(Current, [Item | Items], Acc) -> end. -spec match_prefix( - AKey :: pattern(), - BKey :: pattern(), - ByKey :: gb_trees:tree()) -> + AKey :: pattern(), + BKey :: pattern(), + ByKey :: gb_trees:tree() +) -> boolean(). match_prefix(AKey, BKey, Acc) -> @@ -257,9 +276,10 @@ match_prefix({{Key0, _, _, _}, _, _}, {{Key1, _, true, S1}, _, _}) -> end. -spec update_flags( - AKey :: pattern(), - BKey :: pattern(), - ByKey :: gb_trees:tree()) -> + AKey :: pattern(), + BKey :: pattern(), + ByKey :: gb_trees:tree() +) -> gb_trees:tree(). update_flags(AKey, BKey, Acc) -> @@ -283,6 +303,7 @@ update_flags({Pattern, E0, D0}, {_, E1, D1}) -> get_config_section(Section) -> try config:get(Section) - catch error:badarg -> + catch + error:badarg -> [] end. diff --git a/src/couch/src/couch_hotp.erl b/src/couch/src/couch_hotp.erl index 4ba81c9bf..cdb8291f3 100644 --- a/src/couch/src/couch_hotp.erl +++ b/src/couch/src/couch_hotp.erl @@ -14,15 +14,16 @@ -export([generate/4]). -generate(Alg, Key, Counter, OutputLen) - when is_atom(Alg), is_binary(Key), is_integer(Counter), is_integer(OutputLen) -> +generate(Alg, Key, Counter, OutputLen) when + is_atom(Alg), is_binary(Key), is_integer(Counter), is_integer(OutputLen) +-> Hmac = couch_util:hmac(Alg, Key, <<Counter:64>>), Offset = binary:last(Hmac) band 16#f, Code = ((binary:at(Hmac, Offset) band 16#7f) bsl 24) + - ((binary:at(Hmac, Offset + 1) band 16#ff) bsl 16) + - ((binary:at(Hmac, Offset + 2) band 16#ff) bsl 8) + - ((binary:at(Hmac, Offset + 3) band 16#ff)), + ((binary:at(Hmac, Offset + 1) band 16#ff) bsl 16) + + ((binary:at(Hmac, Offset + 2) band 16#ff) bsl 8) + + (binary:at(Hmac, Offset + 3) band 16#ff), case OutputLen of 6 -> Code rem 1000000; 7 -> Code rem 10000000; diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl index 535fc9245..64b68ce3f 100644 --- a/src/couch/src/couch_httpd.erl +++ b/src/couch/src/couch_httpd.erl @@ -18,21 +18,27 @@ -export([start_link/0, start_link/1, stop/0, handle_request/5]). --export([header_value/2,header_value/3,qs_value/2,qs_value/3,qs/1,qs_json_value/3]). --export([path/1,absolute_uri/2,body_length/1]). --export([verify_is_server_admin/1,unquote/1,quote/1,recv/2,recv_chunked/4,error_info/1]). +-export([header_value/2, header_value/3, qs_value/2, qs_value/3, qs/1, qs_json_value/3]). +-export([path/1, absolute_uri/2, body_length/1]). +-export([verify_is_server_admin/1, unquote/1, quote/1, recv/2, recv_chunked/4, error_info/1]). -export([make_fun_spec_strs/1]). -export([make_arity_1_fun/1, make_arity_2_fun/1, make_arity_3_fun/1]). --export([parse_form/1,json_body/1,json_body_obj/1,body/1]). +-export([parse_form/1, json_body/1, json_body_obj/1, body/1]). -export([doc_etag/1, doc_etag/3, make_etag/1, etag_match/2, etag_respond/3, etag_maybe/2]). --export([primary_header_value/2,partition/1,serve_file/3,serve_file/4, server_header/0]). --export([start_chunked_response/3,send_chunk/2,log_request/2]). +-export([primary_header_value/2, partition/1, serve_file/3, serve_file/4, server_header/0]). +-export([start_chunked_response/3, send_chunk/2, log_request/2]). -export([start_response_length/4, start_response/3, send/2]). -export([start_json_response/2, start_json_response/3, end_json_response/1]). --export([send_response/4,send_response_no_cors/4,send_method_not_allowed/2, - send_error/2,send_error/4, send_redirect/2,send_chunked_error/2]). --export([send_json/2,send_json/3,send_json/4,last_chunk/1,parse_multipart_request/3]). --export([accepted_encodings/1,handle_request_int/5,validate_referer/1,validate_ctype/2]). +-export([ + send_response/4, + send_response_no_cors/4, + send_method_not_allowed/2, + send_error/2, send_error/4, + send_redirect/2, + send_chunked_error/2 +]). +-export([send_json/2, send_json/3, send_json/4, last_chunk/1, parse_multipart_request/3]). +-export([accepted_encodings/1, handle_request_int/5, validate_referer/1, validate_ctype/2]). -export([http_1_0_keep_alive/2]). -export([validate_host/1]). -export([validate_bind_address/1]). @@ -47,7 +53,8 @@ -define(DEFAULT_SOCKET_OPTIONS, "[{sndbuf, 262144}]"). -define(DEFAULT_AUTHENTICATION_HANDLERS, "{couch_httpd_auth, cookie_authentication_handler}, " - "{couch_httpd_auth, default_authentication_handler}"). + "{couch_httpd_auth, default_authentication_handler}" +). start_link() -> start_link(http). @@ -58,18 +65,24 @@ start_link(https) -> Port = config:get("ssl", "port", "6984"), {ok, Ciphers} = couch_util:parse_term(config:get("ssl", "ciphers", undefined)), {ok, Versions} = couch_util:parse_term(config:get("ssl", "tls_versions", undefined)), - {ok, SecureRenegotiate} = couch_util:parse_term(config:get("ssl", "secure_renegotiate", undefined)), + {ok, SecureRenegotiate} = couch_util:parse_term( + config:get("ssl", "secure_renegotiate", undefined) + ), ServerOpts0 = - [{cacertfile, config:get("ssl", "cacert_file", undefined)}, - {keyfile, config:get("ssl", "key_file", undefined)}, - {certfile, config:get("ssl", "cert_file", undefined)}, - {password, config:get("ssl", "password", undefined)}, - {secure_renegotiate, SecureRenegotiate}, - {versions, Versions}, - {ciphers, Ciphers}], - - case (couch_util:get_value(keyfile, ServerOpts0) == undefined orelse - couch_util:get_value(certfile, ServerOpts0) == undefined) of + [ + {cacertfile, config:get("ssl", "cacert_file", undefined)}, + {keyfile, config:get("ssl", "key_file", undefined)}, + {certfile, config:get("ssl", "cert_file", undefined)}, + {password, config:get("ssl", "password", undefined)}, + {secure_renegotiate, SecureRenegotiate}, + {versions, Versions}, + {ciphers, Ciphers} + ], + + case + (couch_util:get_value(keyfile, ServerOpts0) == undefined orelse + couch_util:get_value(certfile, ServerOpts0) == undefined) + of true -> couch_log:error("SSL enabled but PEM certificates are missing", []), throw({error, missing_certs}); @@ -77,44 +90,58 @@ start_link(https) -> ok end, - ServerOpts = [Opt || {_, V}=Opt <- ServerOpts0, V /= undefined], - - ClientOpts = case config:get("ssl", "verify_ssl_certificates", "false") of - "false" -> - []; - "true" -> - FailIfNoPeerCert = case config:get("ssl", "fail_if_no_peer_cert", "false") of - "false" -> false; - "true" -> true - end, - [{depth, list_to_integer(config:get("ssl", - "ssl_certificate_max_depth", "1"))}, - {fail_if_no_peer_cert, FailIfNoPeerCert}, - {verify, verify_peer}] ++ - case config:get("ssl", "verify_fun", undefined) of - undefined -> []; - SpecStr -> - [{verify_fun, make_arity_3_fun(SpecStr)}] - end - end, + ServerOpts = [Opt || {_, V} = Opt <- ServerOpts0, V /= undefined], + + ClientOpts = + case config:get("ssl", "verify_ssl_certificates", "false") of + "false" -> + []; + "true" -> + FailIfNoPeerCert = + case config:get("ssl", "fail_if_no_peer_cert", "false") of + "false" -> false; + "true" -> true + end, + [ + {depth, + list_to_integer( + config:get( + "ssl", + "ssl_certificate_max_depth", + "1" + ) + )}, + {fail_if_no_peer_cert, FailIfNoPeerCert}, + {verify, verify_peer} + ] ++ + case config:get("ssl", "verify_fun", undefined) of + undefined -> []; + SpecStr -> [{verify_fun, make_arity_3_fun(SpecStr)}] + end + end, SslOpts = ServerOpts ++ ClientOpts, Options = - [{port, Port}, - {ssl, true}, - {ssl_opts, SslOpts}], + [ + {port, Port}, + {ssl, true}, + {ssl_opts, SslOpts} + ], start_link(https, Options). start_link(Name, Options) -> - BindAddress = case config:get("httpd", "bind_address", "any") of - "any" -> any; - Else -> Else - end, + BindAddress = + case config:get("httpd", "bind_address", "any") of + "any" -> any; + Else -> Else + end, ok = validate_bind_address(BindAddress), {ok, ServerOptions} = couch_util:parse_term( - config:get("httpd", "server_options", "[]")), + config:get("httpd", "server_options", "[]") + ), {ok, SocketOptions} = couch_util:parse_term( - config:get("httpd", "socket_options", ?DEFAULT_SOCKET_OPTIONS)), + config:get("httpd", "socket_options", ?DEFAULT_SOCKET_OPTIONS) + ), set_auth_handlers(), Handlers = get_httpd_handlers(), @@ -123,21 +150,26 @@ start_link(Name, Options) -> % get the same value. couch_server:get_uuid(), - Loop = fun(Req)-> + Loop = fun(Req) -> case SocketOptions of - [] -> - ok; - _ -> - ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions) + [] -> + ok; + _ -> + ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions) end, apply(?MODULE, handle_request, [Req | Handlers]) end, % set mochiweb options - FinalOptions = lists:append([Options, ServerOptions, [ + FinalOptions = lists:append([ + Options, + ServerOptions, + [ {loop, Loop}, {name, Name}, - {ip, BindAddress}]]), + {ip, BindAddress} + ] + ]), % launch mochiweb case mochiweb_http:start(FinalOptions) of @@ -148,21 +180,27 @@ start_link(Name, Options) -> throw({error, Reason}) end. - stop() -> mochiweb_http:stop(couch_httpd), catch mochiweb_http:stop(https). - set_auth_handlers() -> AuthenticationSrcs = make_fun_spec_strs( - config:get("httpd", "authentication_handlers", - ?DEFAULT_AUTHENTICATION_HANDLERS)), + config:get( + "httpd", + "authentication_handlers", + ?DEFAULT_AUTHENTICATION_HANDLERS + ) + ), AuthHandlers = lists:map( - fun(A) -> {auth_handler_name(A), make_arity_1_fun(A)} end, AuthenticationSrcs), - AuthenticationFuns = AuthHandlers ++ [ - fun couch_httpd_auth:party_mode_handler/1 %% must be last - ], + fun(A) -> {auth_handler_name(A), make_arity_1_fun(A)} end, AuthenticationSrcs + ), + AuthenticationFuns = + AuthHandlers ++ + [ + %% must be last + fun couch_httpd_auth:party_mode_handler/1 + ], ok = application:set_env(couch, auth_handlers, AuthenticationFuns). auth_handler_name(SpecStr) -> @@ -174,21 +212,27 @@ get_httpd_handlers() -> UrlHandlersList = lists:map( fun({UrlKey, SpecStr}) -> {?l2b(UrlKey), make_arity_1_fun(SpecStr)} - end, HttpdGlobalHandlers), + end, + HttpdGlobalHandlers + ), {ok, HttpdDbHandlers} = application:get_env(couch, httpd_db_handlers), DbUrlHandlersList = lists:map( fun({UrlKey, SpecStr}) -> {?l2b(UrlKey), make_arity_2_fun(SpecStr)} - end, HttpdDbHandlers), + end, + HttpdDbHandlers + ), {ok, HttpdDesignHandlers} = application:get_env(couch, httpd_design_handlers), DesignUrlHandlersList = lists:map( fun({UrlKey, SpecStr}) -> {?l2b(UrlKey), make_arity_3_fun(SpecStr)} - end, HttpdDesignHandlers), + end, + HttpdDesignHandlers + ), UrlHandlers = dict:from_list(UrlHandlersList), DbUrlHandlers = dict:from_list(DbUrlHandlersList), @@ -200,26 +244,26 @@ get_httpd_handlers() -> % or "{my_module, my_fun, <<"my_arg">>}" make_arity_1_fun(SpecStr) -> case couch_util:parse_term(SpecStr) of - {ok, {Mod, Fun, SpecArg}} -> - fun(Arg) -> Mod:Fun(Arg, SpecArg) end; - {ok, {Mod, Fun}} -> - fun(Arg) -> Mod:Fun(Arg) end + {ok, {Mod, Fun, SpecArg}} -> + fun(Arg) -> Mod:Fun(Arg, SpecArg) end; + {ok, {Mod, Fun}} -> + fun(Arg) -> Mod:Fun(Arg) end end. make_arity_2_fun(SpecStr) -> case couch_util:parse_term(SpecStr) of - {ok, {Mod, Fun, SpecArg}} -> - fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2, SpecArg) end; - {ok, {Mod, Fun}} -> - fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2) end + {ok, {Mod, Fun, SpecArg}} -> + fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2, SpecArg) end; + {ok, {Mod, Fun}} -> + fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2) end end. make_arity_3_fun(SpecStr) -> case couch_util:parse_term(SpecStr) of - {ok, {Mod, Fun, SpecArg}} -> - fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end; - {ok, {Mod, Fun}} -> - fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end + {ok, {Mod, Fun, SpecArg}} -> + fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end; + {ok, {Mod, Fun}} -> + fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end end. % SpecStr is "{my_module, my_fun}, {my_module2, my_fun2}" @@ -231,15 +275,25 @@ handle_request(MochiReq) -> erlang:put(mochiweb_request_body, Body), apply(?MODULE, handle_request, [MochiReq | get_httpd_handlers()]). -handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers, - DesignUrlHandlers) -> +handle_request( + MochiReq, + DefaultFun, + UrlHandlers, + DbUrlHandlers, + DesignUrlHandlers +) -> %% reset rewrite count for new request erlang:put(?REWRITE_COUNT, 0), MochiReq1 = couch_httpd_vhost:dispatch_host(MochiReq), - handle_request_int(MochiReq1, DefaultFun, - UrlHandlers, DbUrlHandlers, DesignUrlHandlers). + handle_request_int( + MochiReq1, + DefaultFun, + UrlHandlers, + DbUrlHandlers, + DesignUrlHandlers + ). handle_request_int(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers) -> @@ -420,8 +474,10 @@ valid_hosts() -> re:split(List, ",", [{return, list}]). check_request_uri_length(Uri) -> - check_request_uri_length(Uri, - chttpd_util:get_chttpd_config("max_uri_length")). + check_request_uri_length( + Uri, + chttpd_util:get_chttpd_config("max_uri_length") + ). check_request_uri_length(_Uri, undefined) -> ok; @@ -444,34 +500,33 @@ validate_referer(Req) -> Host = host_for_request(Req), Referer = header_value(Req, "Referer", fail), case Referer of - fail -> - throw({bad_request, <<"Referer header required.">>}); - Referer -> - {_,RefererHost,_,_,_} = mochiweb_util:urlsplit(Referer), - if - RefererHost =:= Host -> ok; - true -> throw({bad_request, <<"Referer header must match host.">>}) - end + fail -> + throw({bad_request, <<"Referer header required.">>}); + Referer -> + {_, RefererHost, _, _, _} = mochiweb_util:urlsplit(Referer), + if + RefererHost =:= Host -> ok; + true -> throw({bad_request, <<"Referer header must match host.">>}) + end end. validate_ctype(Req, Ctype) -> case header_value(Req, "Content-Type") of - undefined -> - throw({bad_ctype, "Content-Type must be "++Ctype}); - ReqCtype -> - case string:tokens(ReqCtype, ";") of - [Ctype] -> ok; - [Ctype | _Rest] -> ok; - _Else -> - throw({bad_ctype, "Content-Type must be "++Ctype}) - end + undefined -> + throw({bad_ctype, "Content-Type must be " ++ Ctype}); + ReqCtype -> + case string:tokens(ReqCtype, ";") of + [Ctype] -> ok; + [Ctype | _Rest] -> ok; + _Else -> throw({bad_ctype, "Content-Type must be " ++ Ctype}) + end end. - check_max_request_length(Req) -> Len = list_to_integer(header_value(Req, "Content-Length", "0")), MaxLen = chttpd_util:get_chttpd_config_integer( - "max_http_request_size", 4294967296), + "max_http_request_size", 4294967296 + ), case Len > MaxLen of true -> exit({body_too_large, Len}); @@ -479,32 +534,31 @@ check_max_request_length(Req) -> ok end. - % Utilities partition(Path) -> mochiweb_util:partition(Path, "/"). -header_value(#httpd{mochi_req=MochiReq}, Key) -> +header_value(#httpd{mochi_req = MochiReq}, Key) -> MochiReq:get_header_value(Key). -header_value(#httpd{mochi_req=MochiReq}, Key, Default) -> +header_value(#httpd{mochi_req = MochiReq}, Key, Default) -> case MochiReq:get_header_value(Key) of - undefined -> Default; - Value -> Value + undefined -> Default; + Value -> Value end. -primary_header_value(#httpd{mochi_req=MochiReq}, Key) -> +primary_header_value(#httpd{mochi_req = MochiReq}, Key) -> MochiReq:get_primary_header_value(Key). -accepted_encodings(#httpd{mochi_req=MochiReq}) -> +accepted_encodings(#httpd{mochi_req = MochiReq}) -> case MochiReq:accepted_encodings(["gzip", "identity"]) of - bad_accept_encoding_value -> - throw(bad_accept_encoding_value); - [] -> - throw(unacceptable_encoding); - EncList -> - EncList + bad_accept_encoding_value -> + throw(bad_accept_encoding_value); + [] -> + throw(unacceptable_encoding); + EncList -> + EncList end. serve_file(Req, RelativePath, DocumentRoot) -> @@ -514,7 +568,8 @@ serve_file(Req0, RelativePath0, DocumentRoot0, ExtraHeaders) -> Headers0 = basic_headers(Req0, ExtraHeaders), {ok, {Req1, Code1, Headers1, RelativePath1, DocumentRoot1}} = chttpd_plugin:before_serve_file( - Req0, 200, Headers0, RelativePath0, DocumentRoot0), + Req0, 200, Headers0, RelativePath0, DocumentRoot0 + ), log_request(Req1, Code1), #httpd{mochi_req = MochiReq} = Req1, {ok, MochiReq:serve_file(RelativePath1, DocumentRoot1, Headers1)}. @@ -527,53 +582,61 @@ qs_value(Req, Key, Default) -> qs_json_value(Req, Key, Default) -> case qs_value(Req, Key, Default) of - Default -> - Default; - Result -> - ?JSON_DECODE(Result) + Default -> + Default; + Result -> + ?JSON_DECODE(Result) end. -qs(#httpd{mochi_req=MochiReq}) -> +qs(#httpd{mochi_req = MochiReq}) -> MochiReq:parse_qs(). -path(#httpd{mochi_req=MochiReq}) -> +path(#httpd{mochi_req = MochiReq}) -> MochiReq:get(path). -host_for_request(#httpd{mochi_req=MochiReq}) -> +host_for_request(#httpd{mochi_req = MochiReq}) -> XHost = chttpd_util:get_chttpd_config( - "x_forwarded_host", "X-Forwarded-Host"), + "x_forwarded_host", "X-Forwarded-Host" + ), case MochiReq:get_header_value(XHost) of undefined -> case MochiReq:get_header_value("Host") of undefined -> - {ok, {Address, Port}} = case MochiReq:get(socket) of - {ssl, SslSocket} -> ssl:sockname(SslSocket); - Socket -> inet:sockname(Socket) - end, + {ok, {Address, Port}} = + case MochiReq:get(socket) of + {ssl, SslSocket} -> ssl:sockname(SslSocket); + Socket -> inet:sockname(Socket) + end, inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port); Value1 -> Value1 end; - Value -> Value + Value -> + Value end. -absolute_uri(#httpd{mochi_req=MochiReq}=Req, [$/ | _] = Path) -> +absolute_uri(#httpd{mochi_req = MochiReq} = Req, [$/ | _] = Path) -> Host = host_for_request(Req), XSsl = chttpd_util:get_chttpd_config("x_forwarded_ssl", "X-Forwarded-Ssl"), - Scheme = case MochiReq:get_header_value(XSsl) of - "on" -> "https"; - _ -> - XProto = chttpd_util:get_chttpd_config( - "x_forwarded_proto", "X-Forwarded-Proto"), - case MochiReq:get_header_value(XProto) of - %% Restrict to "https" and "http" schemes only - "https" -> "https"; - _ -> case MochiReq:get(scheme) of - https -> "https"; - http -> "http" - end - end - end, + Scheme = + case MochiReq:get_header_value(XSsl) of + "on" -> + "https"; + _ -> + XProto = chttpd_util:get_chttpd_config( + "x_forwarded_proto", "X-Forwarded-Proto" + ), + case MochiReq:get_header_value(XProto) of + %% Restrict to "https" and "http" schemes only + "https" -> + "https"; + _ -> + case MochiReq:get(scheme) of + https -> "https"; + http -> "http" + end + end + end, Scheme ++ "://" ++ Host ++ Path; absolute_uri(_Req, _Path) -> throw({bad_request, "path must begin with a /."}). @@ -584,60 +647,63 @@ unquote(UrlEncodedString) -> quote(UrlDecodedString) -> mochiweb_util:quote_plus(UrlDecodedString). -parse_form(#httpd{mochi_req=MochiReq}) -> +parse_form(#httpd{mochi_req = MochiReq}) -> mochiweb_multipart:parse_form(MochiReq). -recv(#httpd{mochi_req=MochiReq}, Len) -> +recv(#httpd{mochi_req = MochiReq}, Len) -> MochiReq:recv(Len). -recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) -> +recv_chunked(#httpd{mochi_req = MochiReq}, MaxChunkSize, ChunkFun, InitState) -> % Fun is called once with each chunk % Fun({Length, Binary}, State) % called with Length == 0 on the last time. - MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState, + MochiReq:stream_body( + MaxChunkSize, + ChunkFun, + InitState, chttpd_util:get_chttpd_config_integer( - "max_http_request_size", 4294967296)). + "max_http_request_size", 4294967296 + ) + ). -body_length(#httpd{mochi_req=MochiReq}) -> +body_length(#httpd{mochi_req = MochiReq}) -> MochiReq:get(body_length). -body(#httpd{mochi_req=MochiReq, req_body=undefined}) -> +body(#httpd{mochi_req = MochiReq, req_body = undefined}) -> MaxSize = chttpd_util:get_chttpd_config_integer( - "max_http_request_size", 4294967296), + "max_http_request_size", 4294967296 + ), MochiReq:recv_body(MaxSize); -body(#httpd{req_body=ReqBody}) -> +body(#httpd{req_body = ReqBody}) -> ReqBody. -json_body(#httpd{req_body=undefined} = Httpd) -> +json_body(#httpd{req_body = undefined} = Httpd) -> case body(Httpd) of undefined -> throw({bad_request, "Missing request body"}); Body -> ?JSON_DECODE(maybe_decompress(Httpd, Body)) end; - -json_body(#httpd{req_body=ReqBody}) -> +json_body(#httpd{req_body = ReqBody}) -> ReqBody. json_body_obj(Httpd) -> case json_body(Httpd) of {Props} -> {Props}; - _Else -> - throw({bad_request, "Request body must be a JSON object"}) + _Else -> throw({bad_request, "Request body must be a JSON object"}) end. - maybe_decompress(Httpd, Body) -> case header_value(Httpd, "Content-Encoding", "identity") of - "gzip" -> - zlib:gunzip(Body); - "identity" -> - Body; - Else -> - throw({bad_ctype, [Else, " is not a supported content encoding."]}) + "gzip" -> + zlib:gunzip(Body); + "identity" -> + Body; + Else -> + throw({bad_ctype, [Else, " is not a supported content encoding."]}) end. -doc_etag(#doc{id=Id, body=Body, revs={Start, [DiskRev|_]}}) -> +doc_etag(#doc{id = Id, body = Body, revs = {Start, [DiskRev | _]}}) -> doc_etag(Id, Body, {Start, DiskRev}). doc_etag(<<"_local/", _/binary>>, Body, {Start, DiskRev}) -> @@ -647,7 +713,7 @@ doc_etag(_Id, _Body, {Start, DiskRev}) -> rev_etag({Start, DiskRev}) -> Rev = couch_doc:rev_to_str({Start, DiskRev}), - <<$", Rev/binary, $">>. + <<$", Rev/binary, $">>. make_etag(Term) -> <<SigInt:128/integer>> = couch_hash:md5_hash(term_to_binary(Term)), @@ -655,20 +721,20 @@ make_etag(Term) -> etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) -> etag_match(Req, binary_to_list(CurrentEtag)); - etag_match(Req, CurrentEtag) -> EtagsToMatch = string:tokens( - header_value(Req, "If-None-Match", ""), ", "), + header_value(Req, "If-None-Match", ""), ", " + ), lists:member(CurrentEtag, EtagsToMatch). etag_respond(Req, CurrentEtag, RespFun) -> case etag_match(Req, CurrentEtag) of - true -> - % the client has this in their cache. - send_response(Req, 304, [{"ETag", CurrentEtag}], <<>>); - false -> - % Run the function. - RespFun() + true -> + % the client has this in their cache. + send_response(Req, 304, [{"ETag", CurrentEtag}], <<>>); + false -> + % Run the function. + RespFun() end. etag_maybe(Req, RespFun) -> @@ -679,15 +745,15 @@ etag_maybe(Req, RespFun) -> send_response(Req, 304, [{"ETag", ETag}], <<>>) end. -verify_is_server_admin(#httpd{user_ctx=UserCtx}) -> +verify_is_server_admin(#httpd{user_ctx = UserCtx}) -> verify_is_server_admin(UserCtx); -verify_is_server_admin(#user_ctx{roles=Roles}) -> +verify_is_server_admin(#user_ctx{roles = Roles}) -> case lists:member(<<"_admin">>, Roles) of - true -> ok; - false -> throw({unauthorized, <<"You are not a server admin.">>}) + true -> ok; + false -> throw({unauthorized, <<"You are not a server admin.">>}) end. -log_request(#httpd{mochi_req=MochiReq,peer=Peer}=Req, Code) -> +log_request(#httpd{mochi_req = MochiReq, peer = Peer} = Req, Code) -> case erlang:get(dont_log_request) of true -> ok; @@ -714,16 +780,16 @@ log_response(Code, Body) -> couch_log:error("httpd ~p error response:~n ~s", [Code, Body]) end. -start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers0, Length) -> +start_response_length(#httpd{mochi_req = MochiReq} = Req, Code, Headers0, Length) -> Headers1 = basic_headers(Req, Headers0), Resp = handle_response(Req, Code, Headers1, Length, start_response_length), case MochiReq:get(method) of - 'HEAD' -> throw({http_head_abort, Resp}); - _ -> ok + 'HEAD' -> throw({http_head_abort, Resp}); + _ -> ok end, {ok, Resp}. -start_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers0) -> +start_response(#httpd{mochi_req = MochiReq} = Req, Code, Headers0) -> Headers1 = basic_headers(Req, Headers0), Resp = handle_response(Req, Code, Headers1, undefined, start_response), case MochiReq:get(method) of @@ -741,9 +807,9 @@ send(Resp, Data) -> no_resp_conn_header([]) -> true; -no_resp_conn_header([{Hdr, V}|Rest]) when is_binary(Hdr)-> - no_resp_conn_header([{?b2l(Hdr), V}|Rest]); -no_resp_conn_header([{Hdr, _}|Rest]) when is_list(Hdr)-> +no_resp_conn_header([{Hdr, V} | Rest]) when is_binary(Hdr) -> + no_resp_conn_header([{?b2l(Hdr), V} | Rest]); +no_resp_conn_header([{Hdr, _} | Rest]) when is_list(Hdr) -> case string:to_lower(Hdr) of "connection" -> false; _ -> no_resp_conn_header(Rest) @@ -760,12 +826,12 @@ http_1_0_keep_alive(Req, Headers) -> false -> Headers end. -start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers0) -> +start_chunked_response(#httpd{mochi_req = MochiReq} = Req, Code, Headers0) -> Headers1 = add_headers(Req, Headers0), Resp = handle_response(Req, Code, Headers1, chunked, respond), case MochiReq:get(method) of - 'HEAD' -> throw({http_head_abort, Resp}); - _ -> ok + 'HEAD' -> throw({http_head_abort, Resp}); + _ -> ok end, {ok, Resp}. @@ -774,8 +840,9 @@ send_chunk({remote, Pid, Ref} = Resp, Data) -> {ok, Resp}; send_chunk(Resp, Data) -> case iolist_size(Data) of - 0 -> ok; % do nothing - _ -> Resp:write_chunk(Data) + % do nothing + 0 -> ok; + _ -> Resp:write_chunk(Data) end, {ok, Resp}. @@ -790,17 +857,23 @@ send_response(Req, Code, Headers0, Body) -> Headers1 = chttpd_cors:headers(Req, Headers0), send_response_no_cors(Req, Code, Headers1, Body). -send_response_no_cors(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Body) -> +send_response_no_cors(#httpd{mochi_req = MochiReq} = Req, Code, Headers, Body) -> Headers1 = http_1_0_keep_alive(MochiReq, Headers), Headers2 = basic_headers_no_cors(Req, Headers1), Headers3 = chttpd_xframe_options:header(Req, Headers2), - Headers4 = chttpd_prefer_header:maybe_return_minimal(Req, Headers3), + Headers4 = chttpd_prefer_header:maybe_return_minimal(Req, Headers3), Resp = handle_response(Req, Code, Headers4, Body, respond), log_response(Code, Body), {ok, Resp}. send_method_not_allowed(Req, Methods) -> - send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>, ?l2b("Only " ++ Methods ++ " allowed")). + send_error( + Req, + 405, + [{"Allow", Methods}], + <<"method_not_allowed">>, + ?l2b("Only " ++ Methods ++ " allowed") + ). send_json(Req, Value) -> send_json(Req, 200, Value). @@ -843,13 +916,18 @@ initialize_jsonp(Req) -> _ -> ok end, case get(jsonp) of - no_jsonp -> []; - [] -> []; + no_jsonp -> + []; + [] -> + []; CallBack -> try % make sure jsonp is configured on (default off) - case chttpd_util:get_chttpd_config_boolean( - "allow_jsonp", false) of + case + chttpd_util:get_chttpd_config_boolean( + "allow_jsonp", false + ) + of true -> validate_callback(CallBack); false -> @@ -889,12 +967,10 @@ validate_callback([Char | Rest]) -> _ when Char == $_ -> ok; _ when Char == $[ -> ok; _ when Char == $] -> ok; - _ -> - throw({bad_request, invalid_callback}) + _ -> throw({bad_request, invalid_callback}) end, validate_callback(Rest). - error_info({Error, Reason}) when is_list(Reason) -> error_info({Error, ?l2b(Reason)}); error_info(bad_request) -> @@ -923,8 +999,10 @@ error_info({forbidden, Msg}) -> error_info({unauthorized, Msg}) -> {401, <<"unauthorized">>, Msg}; error_info(file_exists) -> - {412, <<"file_exists">>, <<"The database could not be " - "created, the file already exists.">>}; + {412, <<"file_exists">>, << + "The database could not be " + "created, the file already exists." + >>}; error_info(request_entity_too_large) -> {413, <<"too_large">>, <<"the request entity is too large">>}; error_info({request_entity_too_large, {attachment, AttName}}) -> @@ -938,9 +1016,10 @@ error_info({bad_ctype, Reason}) -> error_info(requested_range_not_satisfiable) -> {416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>}; error_info({error, {illegal_database_name, Name}}) -> - Message = <<"Name: '", Name/binary, "'. Only lowercase characters (a-z), ", - "digits (0-9), and any of the characters _, $, (, ), +, -, and / ", - "are allowed. Must begin with a letter.">>, + Message = + <<"Name: '", Name/binary, "'. Only lowercase characters (a-z), ", + "digits (0-9), and any of the characters _, $, (, ), +, -, and / ", + "are allowed. Must begin with a letter.">>, {400, <<"illegal_database_name">>, Message}; error_info({missing_stub, Reason}) -> {412, <<"missing_stub">>, Reason}; @@ -951,64 +1030,102 @@ error_info({Error, Reason}) -> error_info(Error) -> {500, <<"unknown_error">>, couch_util:to_binary(Error)}. -error_headers(#httpd{mochi_req=MochiReq}=Req, Code, ErrorStr, ReasonStr) -> - if Code == 401 -> - % this is where the basic auth popup is triggered - case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of - undefined -> - case chttpd_util:get_chttpd_config("WWW-Authenticate") of - undefined -> - % If the client is a browser and the basic auth popup isn't turned on - % redirect to the session page. - case ErrorStr of - <<"unauthorized">> -> - case chttpd_util:get_chttpd_auth_config( - "authentication_redirect", "/_utils/session.html") of - undefined -> {Code, []}; - AuthRedirect -> - case chttpd_util:get_chttpd_auth_config_boolean( - "require_valid_user", false) of - true -> - % send the browser popup header no matter what if we are require_valid_user - {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]}; - false -> - case MochiReq:accepts_content_type("application/json") of - true -> - {Code, []}; - false -> - case MochiReq:accepts_content_type("text/html") of - true -> - % Redirect to the path the user requested, not - % the one that is used internally. - UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of - undefined -> - MochiReq:get(path); - VHostPath -> - VHostPath - end, - RedirectLocation = lists:flatten([ - AuthRedirect, - "?return=", couch_util:url_encode(UrlReturnRaw), - "&reason=", couch_util:url_encode(ReasonStr) - ]), - {302, [{"Location", absolute_uri(Req, RedirectLocation)}]}; - false -> +error_headers(#httpd{mochi_req = MochiReq} = Req, Code, ErrorStr, ReasonStr) -> + if + Code == 401 -> + % this is where the basic auth popup is triggered + case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of + undefined -> + case chttpd_util:get_chttpd_config("WWW-Authenticate") of + undefined -> + % If the client is a browser and the basic auth popup isn't turned on + % redirect to the session page. + case ErrorStr of + <<"unauthorized">> -> + case + chttpd_util:get_chttpd_auth_config( + "authentication_redirect", "/_utils/session.html" + ) + of + undefined -> + {Code, []}; + AuthRedirect -> + case + chttpd_util:get_chttpd_auth_config_boolean( + "require_valid_user", false + ) + of + true -> + % send the browser popup header no matter what if we are require_valid_user + {Code, [ + {"WWW-Authenticate", + "Basic realm=\"server\""} + ]}; + false -> + case + MochiReq:accepts_content_type( + "application/json" + ) + of + true -> + {Code, []}; + false -> + case + MochiReq:accepts_content_type( + "text/html" + ) + of + true -> + % Redirect to the path the user requested, not + % the one that is used internally. + UrlReturnRaw = + case + MochiReq:get_header_value( + "x-couchdb-vhost-path" + ) + of + undefined -> + MochiReq:get(path); + VHostPath -> + VHostPath + end, + RedirectLocation = lists:flatten( + [ + AuthRedirect, + "?return=", + couch_util:url_encode( + UrlReturnRaw + ), + "&reason=", + couch_util:url_encode( + ReasonStr + ) + ] + ), + {302, [ + {"Location", + absolute_uri( + Req, + RedirectLocation + )} + ]}; + false -> + {Code, []} + end + end + end + end; + _Else -> {Code, []} - end - end - end + end; + Type -> + {Code, [{"WWW-Authenticate", Type}]} end; - _Else -> - {Code, []} - end; - Type -> - {Code, [{"WWW-Authenticate", Type}]} + Type -> + {Code, [{"WWW-Authenticate", Type}]} end; - Type -> - {Code, [{"WWW-Authenticate", Type}]} - end; - true -> - {Code, []} + true -> + {Code, []} end. send_error(Req, Error) -> @@ -1020,25 +1137,33 @@ send_error(Req, Code, ErrorStr, ReasonStr) -> send_error(Req, Code, [], ErrorStr, ReasonStr). send_error(Req, Code, Headers, ErrorStr, ReasonStr) -> - send_json(Req, Code, Headers, - {[{<<"error">>, ErrorStr}, - {<<"reason">>, ReasonStr}]}). + send_json( + Req, + Code, + Headers, + {[ + {<<"error">>, ErrorStr}, + {<<"reason">>, ReasonStr} + ]} + ). % give the option for list functions to output html or other raw errors send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) -> send_chunk(Resp, Reason), last_chunk(Resp); - send_chunked_error(Resp, Error) -> {Code, ErrorStr, ReasonStr} = error_info(Error), - JsonError = {[{<<"code">>, Code}, - {<<"error">>, ErrorStr}, - {<<"reason">>, ReasonStr}]}, - send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])), + JsonError = + {[ + {<<"code">>, Code}, + {<<"error">>, ErrorStr}, + {<<"reason">>, ReasonStr} + ]}, + send_chunk(Resp, ?l2b([$\n, ?JSON_ENCODE(JsonError), $\n])), last_chunk(Resp). send_redirect(Req, Path) -> - send_response(Req, 301, [{"Location", absolute_uri(Req, Path)}], <<>>). + send_response(Req, 301, [{"Location", absolute_uri(Req, Path)}], <<>>). negotiate_content_type(_Req) -> case get(jsonp) of @@ -1048,27 +1173,33 @@ negotiate_content_type(_Req) -> end. server_header() -> - [{"Server", "CouchDB/" ++ couch_server:get_version() ++ - " (Erlang OTP/" ++ erlang:system_info(otp_release) ++ ")"}]. - + [ + {"Server", + "CouchDB/" ++ couch_server:get_version() ++ + " (Erlang OTP/" ++ erlang:system_info(otp_release) ++ ")"} + ]. -record(mp, {boundary, buffer, data_fun, callback}). - parse_multipart_request(ContentType, DataFun, Callback) -> Boundary0 = iolist_to_binary(get_boundary(ContentType)), Boundary = <<"\r\n--", Boundary0/binary>>, - Mp = #mp{boundary= Boundary, - buffer= <<>>, - data_fun=DataFun, - callback=Callback}, - {Mp2, _NilCallback} = read_until(Mp, <<"--", Boundary0/binary>>, - fun nil_callback/1), - #mp{buffer=Buffer, data_fun=DataFun2, callback=Callback2} = - parse_part_header(Mp2), + Mp = #mp{ + boundary = Boundary, + buffer = <<>>, + data_fun = DataFun, + callback = Callback + }, + {Mp2, _NilCallback} = read_until( + Mp, + <<"--", Boundary0/binary>>, + fun nil_callback/1 + ), + #mp{buffer = Buffer, data_fun = DataFun2, callback = Callback2} = + parse_part_header(Mp2), {Buffer, DataFun2, Callback2}. -nil_callback(_Data)-> +nil_callback(_Data) -> fun nil_callback/1. get_boundary({"multipart/" ++ _, Opts}) -> @@ -1077,83 +1208,102 @@ get_boundary({"multipart/" ++ _, Opts}) -> S end; get_boundary(ContentType) -> - {"multipart/" ++ _ , Opts} = mochiweb_util:parse_header(ContentType), + {"multipart/" ++ _, Opts} = mochiweb_util:parse_header(ContentType), get_boundary({"multipart/", Opts}). - - split_header(<<>>) -> []; split_header(Line) -> - {Name, Rest} = lists:splitwith(fun (C) -> C =/= $: end, - binary_to_list(Line)), - [$: | Value] = case Rest of - [] -> - throw({bad_request, <<"bad part header">>}); - Res -> - Res - end, - [{string:to_lower(string:strip(Name)), - mochiweb_util:parse_header(Value)}]. + {Name, Rest} = lists:splitwith( + fun(C) -> C =/= $: end, + binary_to_list(Line) + ), + [$: | Value] = + case Rest of + [] -> + throw({bad_request, <<"bad part header">>}); + Res -> + Res + end, + [{string:to_lower(string:strip(Name)), mochiweb_util:parse_header(Value)}]. -read_until(#mp{data_fun=DataFun, buffer=Buffer}=Mp, Pattern, Callback) -> +read_until(#mp{data_fun = DataFun, buffer = Buffer} = Mp, Pattern, Callback) -> case couch_util:find_in_binary(Pattern, Buffer) of - not_found -> - Callback2 = Callback(Buffer), - {Buffer2, DataFun2} = DataFun(), - Buffer3 = iolist_to_binary(Buffer2), - read_until(Mp#mp{data_fun=DataFun2,buffer=Buffer3}, Pattern, Callback2); - {partial, 0} -> - {NewData, DataFun2} = DataFun(), - read_until(Mp#mp{data_fun=DataFun2, - buffer= iolist_to_binary([Buffer,NewData])}, - Pattern, Callback); - {partial, Skip} -> - <<DataChunk:Skip/binary, Rest/binary>> = Buffer, - Callback2 = Callback(DataChunk), - {NewData, DataFun2} = DataFun(), - read_until(Mp#mp{data_fun=DataFun2, - buffer= iolist_to_binary([Rest | NewData])}, - Pattern, Callback2); - {exact, 0} -> - PatternLen = size(Pattern), - <<_:PatternLen/binary, Rest/binary>> = Buffer, - {Mp#mp{buffer= Rest}, Callback}; - {exact, Skip} -> - PatternLen = size(Pattern), - <<DataChunk:Skip/binary, _:PatternLen/binary, Rest/binary>> = Buffer, - Callback2 = Callback(DataChunk), - {Mp#mp{buffer= Rest}, Callback2} + not_found -> + Callback2 = Callback(Buffer), + {Buffer2, DataFun2} = DataFun(), + Buffer3 = iolist_to_binary(Buffer2), + read_until(Mp#mp{data_fun = DataFun2, buffer = Buffer3}, Pattern, Callback2); + {partial, 0} -> + {NewData, DataFun2} = DataFun(), + read_until( + Mp#mp{ + data_fun = DataFun2, + buffer = iolist_to_binary([Buffer, NewData]) + }, + Pattern, + Callback + ); + {partial, Skip} -> + <<DataChunk:Skip/binary, Rest/binary>> = Buffer, + Callback2 = Callback(DataChunk), + {NewData, DataFun2} = DataFun(), + read_until( + Mp#mp{ + data_fun = DataFun2, + buffer = iolist_to_binary([Rest | NewData]) + }, + Pattern, + Callback2 + ); + {exact, 0} -> + PatternLen = size(Pattern), + <<_:PatternLen/binary, Rest/binary>> = Buffer, + {Mp#mp{buffer = Rest}, Callback}; + {exact, Skip} -> + PatternLen = size(Pattern), + <<DataChunk:Skip/binary, _:PatternLen/binary, Rest/binary>> = Buffer, + Callback2 = Callback(DataChunk), + {Mp#mp{buffer = Rest}, Callback2} end. - -parse_part_header(#mp{callback=UserCallBack}=Mp) -> - {Mp2, AccCallback} = read_until(Mp, <<"\r\n\r\n">>, - fun(Next) -> acc_callback(Next, []) end), +parse_part_header(#mp{callback = UserCallBack} = Mp) -> + {Mp2, AccCallback} = read_until( + Mp, + <<"\r\n\r\n">>, + fun(Next) -> acc_callback(Next, []) end + ), HeaderData = AccCallback(get_data), Headers = - lists:foldl(fun(Line, Acc) -> - split_header(Line) ++ Acc - end, [], re:split(HeaderData,<<"\r\n">>, [])), + lists:foldl( + fun(Line, Acc) -> + split_header(Line) ++ Acc + end, + [], + re:split(HeaderData, <<"\r\n">>, []) + ), NextCallback = UserCallBack({headers, Headers}), - parse_part_body(Mp2#mp{callback=NextCallback}). - -parse_part_body(#mp{boundary=Prefix, callback=Callback}=Mp) -> - {Mp2, WrappedCallback} = read_until(Mp, Prefix, - fun(Data) -> body_callback_wrapper(Data, Callback) end), + parse_part_body(Mp2#mp{callback = NextCallback}). + +parse_part_body(#mp{boundary = Prefix, callback = Callback} = Mp) -> + {Mp2, WrappedCallback} = read_until( + Mp, + Prefix, + fun(Data) -> body_callback_wrapper(Data, Callback) end + ), Callback2 = WrappedCallback(get_callback), Callback3 = Callback2(body_end), - case check_for_last(Mp2#mp{callback=Callback3}) of - {last, #mp{callback=Callback3}=Mp3} -> - Mp3#mp{callback=Callback3(eof)}; - {more, Mp3} -> - parse_part_header(Mp3) + case check_for_last(Mp2#mp{callback = Callback3}) of + {last, #mp{callback = Callback3} = Mp3} -> + Mp3#mp{callback = Callback3(eof)}; + {more, Mp3} -> + parse_part_header(Mp3) end. -acc_callback(get_data, Acc)-> +acc_callback(get_data, Acc) -> iolist_to_binary(lists:reverse(Acc)); -acc_callback(Data, Acc)-> +acc_callback(Data, Acc) -> fun(Next) -> acc_callback(Next, [Data | Acc]) end. body_callback_wrapper(get_callback, Callback) -> @@ -1162,18 +1312,23 @@ body_callback_wrapper(Data, Callback) -> Callback2 = Callback({body, Data}), fun(Next) -> body_callback_wrapper(Next, Callback2) end. - -check_for_last(#mp{buffer=Buffer, data_fun=DataFun}=Mp) -> +check_for_last(#mp{buffer = Buffer, data_fun = DataFun} = Mp) -> case Buffer of - <<"--",_/binary>> -> {last, Mp}; - <<_, _, _/binary>> -> {more, Mp}; - _ -> % not long enough - {Data, DataFun2} = DataFun(), - check_for_last(Mp#mp{buffer= <<Buffer/binary, Data/binary>>, - data_fun = DataFun2}) + <<"--", _/binary>> -> + {last, Mp}; + <<_, _, _/binary>> -> + {more, Mp}; + % not long enough + _ -> + {Data, DataFun2} = DataFun(), + check_for_last(Mp#mp{ + buffer = <<Buffer/binary, Data/binary>>, + data_fun = DataFun2 + }) end. -validate_bind_address(any) -> ok; +validate_bind_address(any) -> + ok; validate_bind_address(Address) -> case inet_parse:address(Address) of {ok, _} -> ok; @@ -1191,9 +1346,9 @@ basic_headers(Req, Headers0) -> chttpd_cors:headers(Req, Headers2). basic_headers_no_cors(Req, Headers) -> - Headers - ++ server_header() - ++ couch_httpd_auth:cookie_auth_header(Req, Headers). + Headers ++ + server_header() ++ + couch_httpd_auth:cookie_auth_header(Req, Headers). handle_response(Req0, Code0, Headers0, Args0, Type) -> {ok, {Req1, Code1, Headers1, Args1}} = before_response(Req0, Code0, Headers0, Args0), @@ -1259,27 +1414,40 @@ maybe_add_default_headers_test_() -> MustRevalidate = {"Cache-Control", "must-revalidate"}, ApplicationJavascript = {"Content-Type", "application/javascript"}, Cases = [ - {[], - [MustRevalidate, ApplicationJavascript], - "Should add Content-Type and Cache-Control to empty heaeders"}, - - {[NoCache], - [NoCache, ApplicationJavascript], - "Should add Content-Type only if Cache-Control is present"}, - - {[ApplicationJson], - [MustRevalidate, ApplicationJson], - "Should add Cache-Control if Content-Type is present"}, - - {[NoCache, ApplicationJson], - [NoCache, ApplicationJson], - "Should not add headers if Cache-Control and Content-Type are there"} + { + [], + [MustRevalidate, ApplicationJavascript], + "Should add Content-Type and Cache-Control to empty heaeders" + }, + + { + [NoCache], + [NoCache, ApplicationJavascript], + "Should add Content-Type only if Cache-Control is present" + }, + + { + [ApplicationJson], + [MustRevalidate, ApplicationJson], + "Should add Cache-Control if Content-Type is present" + }, + + { + [NoCache, ApplicationJson], + [NoCache, ApplicationJson], + "Should not add headers if Cache-Control and Content-Type are there" + } ], - Tests = lists:map(fun({InitialHeaders, ProperResult, Desc}) -> - {Desc, - ?_assertEqual(ProperResult, - maybe_add_default_headers(DummyRequest, InitialHeaders))} - end, Cases), + Tests = lists:map( + fun({InitialHeaders, ProperResult, Desc}) -> + {Desc, + ?_assertEqual( + ProperResult, + maybe_add_default_headers(DummyRequest, InitialHeaders) + )} + end, + Cases + ), {"Tests adding default headers", Tests}. log_request_test_() -> @@ -1299,27 +1467,24 @@ log_request_test_() -> [ fun() -> should_accept_code_and_message(true) end, fun() -> should_accept_code_and_message(false) end - ] - }. + ]}. should_accept_code_and_message(DontLogFlag) -> erlang:put(dont_log_response, DontLogFlag), - {"with dont_log_response = " ++ atom_to_list(DontLogFlag), - [ - {"Should accept code 200 and string message", - ?_assertEqual(ok, log_response(200, "OK"))}, - {"Should accept code 200 and JSON message", + {"with dont_log_response = " ++ atom_to_list(DontLogFlag), [ + {"Should accept code 200 and string message", ?_assertEqual(ok, log_response(200, "OK"))}, + {"Should accept code 200 and JSON message", ?_assertEqual(ok, log_response(200, {json, {[{ok, true}]}}))}, - {"Should accept code >= 400 and string error", + {"Should accept code >= 400 and string error", ?_assertEqual(ok, log_response(405, method_not_allowed))}, - {"Should accept code >= 400 and JSON error", - ?_assertEqual(ok, - log_response(405, {json, {[{error, method_not_allowed}]}}))}, - {"Should accept code >= 500 and string error", - ?_assertEqual(ok, log_response(500, undef))}, - {"Should accept code >= 500 and JSON error", + {"Should accept code >= 400 and JSON error", + ?_assertEqual( + ok, + log_response(405, {json, {[{error, method_not_allowed}]}}) + )}, + {"Should accept code >= 500 and string error", ?_assertEqual(ok, log_response(500, undef))}, + {"Should accept code >= 500 and JSON error", ?_assertEqual(ok, log_response(500, {json, {[{error, undef}]}}))} - ] - }. + ]}. -endif. diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl index 01a210d05..7bcb85fba 100644 --- a/src/couch/src/couch_httpd_auth.erl +++ b/src/couch/src/couch_httpd_auth.erl @@ -18,8 +18,10 @@ -export([party_mode_handler/1]). --export([default_authentication_handler/1, default_authentication_handler/2, - special_test_authentication_handler/1]). +-export([ + default_authentication_handler/1, default_authentication_handler/2, + special_test_authentication_handler/1 +]). -export([cookie_authentication_handler/1, cookie_authentication_handler/2]). -export([null_authentication_handler/1]). -export([proxy_authentication_handler/1, proxy_authentification_handler/1]). @@ -33,59 +35,68 @@ -export([jwt_authentication_handler/1]). --import(couch_httpd, [header_value/2, send_json/2, send_json/4, send_method_not_allowed/2, maybe_decompress/2]). +-import(couch_httpd, [ + header_value/2, send_json/2, send_json/4, send_method_not_allowed/2, maybe_decompress/2 +]). --compile({no_auto_import,[integer_to_binary/1, integer_to_binary/2]}). +-compile({no_auto_import, [integer_to_binary/1, integer_to_binary/2]}). party_mode_handler(Req) -> - case chttpd_util:get_chttpd_auth_config_boolean( - "require_valid_user", false) of - true -> - throw({unauthorized, <<"Authentication required.">>}); - false -> - Req#httpd{user_ctx=#user_ctx{}} + case + chttpd_util:get_chttpd_auth_config_boolean( + "require_valid_user", false + ) + of + true -> + throw({unauthorized, <<"Authentication required.">>}); + false -> + Req#httpd{user_ctx = #user_ctx{}} end. special_test_authentication_handler(Req) -> case header_value(Req, "WWW-Authenticate") of - "X-Couch-Test-Auth " ++ NamePass -> - % NamePass is a colon separated string: "joe schmoe:a password". - [Name, Pass] = re:split(NamePass, ":", [{return, list}, {parts, 2}]), - case {Name, Pass} of - {"Jan Lehnardt", "apple"} -> ok; - {"Christopher Lenz", "dog food"} -> ok; - {"Noah Slater", "biggiesmalls endian"} -> ok; - {"Chris Anderson", "mp3"} -> ok; - {"Damien Katz", "pecan pie"} -> ok; - {_, _} -> - throw({unauthorized, <<"Name or password is incorrect.">>}) - end, - Req#httpd{user_ctx=#user_ctx{name=?l2b(Name)}}; - _ -> - % No X-Couch-Test-Auth credentials sent, give admin access so the - % previous authentication can be restored after the test - Req#httpd{user_ctx=?ADMIN_USER} + "X-Couch-Test-Auth " ++ NamePass -> + % NamePass is a colon separated string: "joe schmoe:a password". + [Name, Pass] = re:split(NamePass, ":", [{return, list}, {parts, 2}]), + case {Name, Pass} of + {"Jan Lehnardt", "apple"} -> ok; + {"Christopher Lenz", "dog food"} -> ok; + {"Noah Slater", "biggiesmalls endian"} -> ok; + {"Chris Anderson", "mp3"} -> ok; + {"Damien Katz", "pecan pie"} -> ok; + {_, _} -> throw({unauthorized, <<"Name or password is incorrect.">>}) + end, + Req#httpd{user_ctx = #user_ctx{name = ?l2b(Name)}}; + _ -> + % No X-Couch-Test-Auth credentials sent, give admin access so the + % previous authentication can be restored after the test + Req#httpd{user_ctx = ?ADMIN_USER} end. basic_name_pw(Req) -> AuthorizationHeader = header_value(Req, "Authorization"), case AuthorizationHeader of - "Basic " ++ Base64Value -> - try re:split(base64:decode(Base64Value), ":", - [{return, list}, {parts, 2}]) of - ["_", "_"] -> - % special name and pass to be logged out - nil; - [User, Pass] -> - {User, Pass}; + "Basic " ++ Base64Value -> + try + re:split( + base64:decode(Base64Value), + ":", + [{return, list}, {parts, 2}] + ) + of + ["_", "_"] -> + % special name and pass to be logged out + nil; + [User, Pass] -> + {User, Pass}; + _ -> + nil + catch + error:function_clause -> + throw({bad_request, "Authorization header has invalid base64 value"}) + end; _ -> nil - catch - error:function_clause -> - throw({bad_request, "Authorization header has invalid base64 value"}) - end; - _ -> - nil end. default_authentication_handler(Req) -> @@ -93,42 +104,47 @@ default_authentication_handler(Req) -> default_authentication_handler(Req, AuthModule) -> case basic_name_pw(Req) of - {User, Pass} -> - case AuthModule:get_user_creds(Req, User) of - nil -> - throw({unauthorized, <<"Name or password is incorrect.">>}); - {ok, UserProps, _AuthCtx} -> - reject_if_totp(UserProps), - UserName = ?l2b(User), - Password = ?l2b(Pass), - case authenticate(Password, UserProps) of - true -> - Req#httpd{user_ctx=#user_ctx{ - name=UserName, - roles=couch_util:get_value(<<"roles">>, UserProps, []) - }}; - false -> - authentication_warning(Req, UserName), - throw({unauthorized, <<"Name or password is incorrect.">>}) - end - end; - nil -> - case couch_server:has_admins() of - true -> - Req; - false -> - case chttpd_util:get_chttpd_auth_config_boolean( - "require_valid_user", false) of - true -> Req; - % If no admins, and no user required, then everyone is admin! - % Yay, admin party! - false -> Req#httpd{user_ctx=?ADMIN_USER} + {User, Pass} -> + case AuthModule:get_user_creds(Req, User) of + nil -> + throw({unauthorized, <<"Name or password is incorrect.">>}); + {ok, UserProps, _AuthCtx} -> + reject_if_totp(UserProps), + UserName = ?l2b(User), + Password = ?l2b(Pass), + case authenticate(Password, UserProps) of + true -> + Req#httpd{ + user_ctx = #user_ctx{ + name = UserName, + roles = couch_util:get_value(<<"roles">>, UserProps, []) + } + }; + false -> + authentication_warning(Req, UserName), + throw({unauthorized, <<"Name or password is incorrect.">>}) + end + end; + nil -> + case couch_server:has_admins() of + true -> + Req; + false -> + case + chttpd_util:get_chttpd_auth_config_boolean( + "require_valid_user", false + ) + of + true -> Req; + % If no admins, and no user required, then everyone is admin! + % Yay, admin party! + false -> Req#httpd{user_ctx = ?ADMIN_USER} + end end - end end. null_authentication_handler(Req) -> - Req#httpd{user_ctx=?ADMIN_USER}. + Req#httpd{user_ctx = ?ADMIN_USER}. %% @doc proxy auth handler. % @@ -155,39 +171,53 @@ proxy_authentication_handler(Req) -> %% @deprecated proxy_authentification_handler(Req) -> proxy_authentication_handler(Req). - + proxy_auth_user(Req) -> XHeaderUserName = chttpd_util:get_chttpd_auth_config( - "x_auth_username", "X-Auth-CouchDB-UserName"), + "x_auth_username", "X-Auth-CouchDB-UserName" + ), XHeaderRoles = chttpd_util:get_chttpd_auth_config( - "x_auth_roles", "X-Auth-CouchDB-Roles"), + "x_auth_roles", "X-Auth-CouchDB-Roles" + ), XHeaderToken = chttpd_util:get_chttpd_auth_config( - "x_auth_token", "X-Auth-CouchDB-Token"), + "x_auth_token", "X-Auth-CouchDB-Token" + ), case header_value(Req, XHeaderUserName) of - undefined -> nil; + undefined -> + nil; UserName -> - Roles = case header_value(Req, XHeaderRoles) of - undefined -> []; - Else -> - [?l2b(R) || R <- string:tokens(Else, ",")] - end, - case chttpd_util:get_chttpd_auth_config_boolean( - "proxy_use_secret", false) of + Roles = + case header_value(Req, XHeaderRoles) of + undefined -> []; + Else -> [?l2b(R) || R <- string:tokens(Else, ",")] + end, + case + chttpd_util:get_chttpd_auth_config_boolean( + "proxy_use_secret", false + ) + of true -> case chttpd_util:get_chttpd_auth_config("secret") of undefined -> - Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}}; + Req#httpd{user_ctx = #user_ctx{name = ?l2b(UserName), roles = Roles}}; Secret -> - ExpectedToken = couch_util:to_hex(couch_util:hmac(sha, Secret, UserName)), + ExpectedToken = couch_util:to_hex( + couch_util:hmac(sha, Secret, UserName) + ), case header_value(Req, XHeaderToken) of Token when Token == ExpectedToken -> - Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), - roles=Roles}}; - _ -> nil + Req#httpd{ + user_ctx = #user_ctx{ + name = ?l2b(UserName), + roles = Roles + } + }; + _ -> + nil end end; false -> - Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}} + Req#httpd{user_ctx = #user_ctx{name = ?l2b(UserName), roles = Roles}} end end. @@ -198,22 +228,35 @@ jwt_authentication_handler(Req) -> case jwtf:decode(?l2b(Jwt), [alg | RequiredClaims], fun jwtf_keystore:get/2) of {ok, {Claims}} -> case lists:keyfind(<<"sub">>, 1, Claims) of - false -> throw({unauthorized, <<"Token missing sub claim.">>}); - {_, User} -> Req#httpd{user_ctx=#user_ctx{ - name = User, - roles = couch_util:get_value(?l2b(config:get("jwt_auth", "roles_claim_name", "_couchdb.roles")), Claims, []) - }} + false -> + throw({unauthorized, <<"Token missing sub claim.">>}); + {_, User} -> + Req#httpd{ + user_ctx = #user_ctx{ + name = User, + roles = couch_util:get_value( + ?l2b( + config:get( + "jwt_auth", "roles_claim_name", "_couchdb.roles" + ) + ), + Claims, + [] + ) + } + } end; {error, Reason} -> throw(Reason) end; - _ -> Req + _ -> + Req end. get_configured_claims() -> Claims = config:get("jwt_auth", "required_claims", ""), Re = "((?<key1>[a-z]+)|{(?<key2>[a-z]+)\s*,\s*\"(?<val>[^\"]+)\"})", - case re:run(Claims, Re, [global, {capture, [key1, key2, val], binary}]) of + case re:run(Claims, Re, [global, {capture, [key1, key2, val], binary}]) of nomatch when Claims /= "" -> couch_log:error("[jwt_auth] required_claims is set to an invalid value.", []), throw({misconfigured_server, <<"JWT is not configured correctly">>}); @@ -231,61 +274,77 @@ to_claim([<<>>, Key, Value]) -> cookie_authentication_handler(Req) -> cookie_authentication_handler(Req, couch_auth_cache). -cookie_authentication_handler(#httpd{mochi_req=MochiReq}=Req, AuthModule) -> +cookie_authentication_handler(#httpd{mochi_req = MochiReq} = Req, AuthModule) -> case MochiReq:get_cookie_value("AuthSession") of - undefined -> Req; - [] -> Req; - Cookie -> - [User, TimeStr, HashStr] = try - AuthSession = couch_util:decodeBase64Url(Cookie), - [_A, _B, _Cs] = re:split(?b2l(AuthSession), ":", - [{return, list}, {parts, 3}]) - catch - _:_Error -> - Reason = <<"Malformed AuthSession cookie. Please clear your cookies.">>, - throw({bad_request, Reason}) - end, - % Verify expiry and hash - CurrentTime = make_cookie_time(), - case chttpd_util:get_chttpd_auth_config("secret") of undefined -> - couch_log:debug("cookie auth secret is not set",[]), Req; - SecretStr -> - Secret = ?l2b(SecretStr), - case AuthModule:get_user_creds(Req, User) of - nil -> Req; - {ok, UserProps, _AuthCtx} -> - UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<"">>), - FullSecret = <<Secret/binary, UserSalt/binary>>, - ExpectedHash = couch_util:hmac(sha, FullSecret, User ++ ":" ++ TimeStr), - Hash = ?l2b(HashStr), - Timeout = chttpd_util:get_chttpd_auth_config_integer( - "timeout", 600), - couch_log:debug("timeout ~p", [Timeout]), - case (catch erlang:list_to_integer(TimeStr, 16)) of - TimeStamp when CurrentTime < TimeStamp + Timeout -> - case couch_passwords:verify(ExpectedHash, Hash) of - true -> - TimeLeft = TimeStamp + Timeout - CurrentTime, - couch_log:debug("Successful cookie auth as: ~p", - [User]), - Req#httpd{user_ctx=#user_ctx{ - name=?l2b(User), - roles=couch_util:get_value(<<"roles">>, UserProps, []) - }, auth={FullSecret, TimeLeft < Timeout*0.9}}; - _Else -> - Req - end; - _Else -> - Req - end + [] -> + Req; + Cookie -> + [User, TimeStr, HashStr] = + try + AuthSession = couch_util:decodeBase64Url(Cookie), + [_A, _B, _Cs] = re:split( + ?b2l(AuthSession), + ":", + [{return, list}, {parts, 3}] + ) + catch + _:_Error -> + Reason = <<"Malformed AuthSession cookie. Please clear your cookies.">>, + throw({bad_request, Reason}) + end, + % Verify expiry and hash + CurrentTime = make_cookie_time(), + case chttpd_util:get_chttpd_auth_config("secret") of + undefined -> + couch_log:debug("cookie auth secret is not set", []), + Req; + SecretStr -> + Secret = ?l2b(SecretStr), + case AuthModule:get_user_creds(Req, User) of + nil -> + Req; + {ok, UserProps, _AuthCtx} -> + UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<"">>), + FullSecret = <<Secret/binary, UserSalt/binary>>, + ExpectedHash = couch_util:hmac(sha, FullSecret, User ++ ":" ++ TimeStr), + Hash = ?l2b(HashStr), + Timeout = chttpd_util:get_chttpd_auth_config_integer( + "timeout", 600 + ), + couch_log:debug("timeout ~p", [Timeout]), + case (catch erlang:list_to_integer(TimeStr, 16)) of + TimeStamp when CurrentTime < TimeStamp + Timeout -> + case couch_passwords:verify(ExpectedHash, Hash) of + true -> + TimeLeft = TimeStamp + Timeout - CurrentTime, + couch_log:debug( + "Successful cookie auth as: ~p", + [User] + ), + Req#httpd{ + user_ctx = #user_ctx{ + name = ?l2b(User), + roles = couch_util:get_value( + <<"roles">>, UserProps, [] + ) + }, + auth = {FullSecret, TimeLeft < Timeout * 0.9} + }; + _Else -> + Req + end; + _Else -> + Req + end + end end - end end. -cookie_auth_header(#httpd{user_ctx=#user_ctx{name=null}}, _Headers) -> []; -cookie_auth_header(#httpd{user_ctx=#user_ctx{name=User}, auth={Secret, true}}=Req, Headers) -> +cookie_auth_header(#httpd{user_ctx = #user_ctx{name = null}}, _Headers) -> + []; +cookie_auth_header(#httpd{user_ctx = #user_ctx{name = User}, auth = {Secret, true}} = Req, Headers) -> % Note: we only set the AuthSession cookie if: % * a valid AuthSession cookie has been received % * we are outside a 10% timeout window @@ -296,20 +355,24 @@ cookie_auth_header(#httpd{user_ctx=#user_ctx{name=User}, auth={Secret, true}}=Re CookieHeader = couch_util:get_value("Set-Cookie", Headers, ""), Cookies = mochiweb_cookies:parse_cookie(CookieHeader), AuthSession = couch_util:get_value("AuthSession", Cookies), - if AuthSession == undefined -> - TimeStamp = make_cookie_time(), - [cookie_auth_cookie(Req, ?b2l(User), Secret, TimeStamp)]; - true -> - [] + if + AuthSession == undefined -> + TimeStamp = make_cookie_time(), + [cookie_auth_cookie(Req, ?b2l(User), Secret, TimeStamp)]; + true -> + [] end; -cookie_auth_header(_Req, _Headers) -> []. +cookie_auth_header(_Req, _Headers) -> + []. cookie_auth_cookie(Req, User, Secret, TimeStamp) -> SessionData = User ++ ":" ++ erlang:integer_to_list(TimeStamp, 16), Hash = couch_util:hmac(sha, Secret, SessionData), - mochiweb_cookies:cookie("AuthSession", + mochiweb_cookies:cookie( + "AuthSession", couch_util:encodeBase64Url(SessionData ++ ":" ++ ?b2l(Hash)), - [{path, "/"}] ++ cookie_scheme(Req) ++ max_age() ++ cookie_domain() ++ same_site()). + [{path, "/"}] ++ cookie_scheme(Req) ++ max_age() ++ cookie_domain() ++ same_site() + ). ensure_cookie_auth_secret() -> case chttpd_util:get_chttpd_auth_config("secret") of @@ -317,7 +380,8 @@ ensure_cookie_auth_secret() -> NewSecret = ?b2l(couch_uuids:random()), config:set("chttpd_auth", "secret", NewSecret), NewSecret; - Secret -> Secret + Secret -> + Secret end. % session handlers @@ -325,27 +389,32 @@ ensure_cookie_auth_secret() -> handle_session_req(Req) -> handle_session_req(Req, couch_auth_cache). -handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req, AuthModule) -> +handle_session_req(#httpd{method = 'POST', mochi_req = MochiReq} = Req, AuthModule) -> ReqBody = MochiReq:recv_body(), - Form = case MochiReq:get_primary_header_value("content-type") of - % content type should be json - "application/x-www-form-urlencoded" ++ _ -> - mochiweb_util:parse_qs(ReqBody); - "application/json" ++ _ -> - {Pairs} = ?JSON_DECODE(maybe_decompress(Req, ReqBody)), - lists:map(fun({Key, Value}) -> - {?b2l(Key), ?b2l(Value)} - end, Pairs); - _ -> - [] - end, + Form = + case MochiReq:get_primary_header_value("content-type") of + % content type should be json + "application/x-www-form-urlencoded" ++ _ -> + mochiweb_util:parse_qs(ReqBody); + "application/json" ++ _ -> + {Pairs} = ?JSON_DECODE(maybe_decompress(Req, ReqBody)), + lists:map( + fun({Key, Value}) -> + {?b2l(Key), ?b2l(Value)} + end, + Pairs + ); + _ -> + [] + end, UserName = ?l2b(extract_username(Form)), Password = ?l2b(couch_util:get_value("password", Form, "")), - couch_log:debug("Attempt Login: ~s",[UserName]), - {ok, UserProps, _AuthCtx} = case AuthModule:get_user_creds(Req, UserName) of - nil -> {ok, [], nil}; - Result -> Result - end, + couch_log:debug("Attempt Login: ~s", [UserName]), + {ok, UserProps, _AuthCtx} = + case AuthModule:get_user_creds(Req, UserName) of + nil -> {ok, [], nil}; + Result -> Result + end, case authenticate(Password, UserProps) of true -> verify_totp(UserProps, Form), @@ -353,68 +422,102 @@ handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req, AuthModule) -> Secret = ?l2b(ensure_cookie_auth_secret()), UserSalt = couch_util:get_value(<<"salt">>, UserProps), CurrentTime = make_cookie_time(), - Cookie = cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime), + Cookie = cookie_auth_cookie( + Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime + ), % TODO document the "next" feature in Futon - {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of - nil -> - {200, [Cookie]}; - Redirect -> - {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]} - end, - send_json(Req#httpd{req_body=ReqBody}, Code, Headers, + {Code, Headers} = + case couch_httpd:qs_value(Req, "next", nil) of + nil -> + {200, [Cookie]}; + Redirect -> + {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]} + end, + send_json( + Req#httpd{req_body = ReqBody}, + Code, + Headers, {[ {ok, true}, {name, UserName}, {roles, couch_util:get_value(<<"roles">>, UserProps, [])} - ]}); + ]} + ); false -> authentication_warning(Req, UserName), % clear the session - Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req)), - {Code, Headers} = case couch_httpd:qs_value(Req, "fail", nil) of - nil -> - {401, [Cookie]}; - Redirect -> - {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]} - end, - send_json(Req, Code, Headers, {[{error, <<"unauthorized">>},{reason, <<"Name or password is incorrect.">>}]}) + Cookie = mochiweb_cookies:cookie( + "AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req) + ), + {Code, Headers} = + case couch_httpd:qs_value(Req, "fail", nil) of + nil -> + {401, [Cookie]}; + Redirect -> + {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]} + end, + send_json( + Req, + Code, + Headers, + {[{error, <<"unauthorized">>}, {reason, <<"Name or password is incorrect.">>}]} + ) end; % get user info % GET /_session -handle_session_req(#httpd{method='GET', user_ctx=UserCtx}=Req, _AuthModule) -> +handle_session_req(#httpd{method = 'GET', user_ctx = UserCtx} = Req, _AuthModule) -> Name = UserCtx#user_ctx.name, ForceLogin = couch_httpd:qs_value(Req, "basic", "false"), case {Name, ForceLogin} of {null, "true"} -> throw({unauthorized, <<"Please login.">>}); {Name, _} -> - send_json(Req, {[ - % remove this ok - {ok, true}, - {<<"userCtx">>, {[ - {name, Name}, - {roles, UserCtx#user_ctx.roles} - ]}}, - {info, {[ - {authentication_handlers, [ - N || {N, _Fun} <- Req#httpd.authentication_handlers]} - ] ++ maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) -> - Handler - end) ++ maybe_value(authentication_db, config:get("chttpd_auth", "authentication_db"), fun(Val) -> - ?l2b(Val) - end)}} - ]}) + send_json( + Req, + {[ + % remove this ok + {ok, true}, + {<<"userCtx">>, + {[ + {name, Name}, + {roles, UserCtx#user_ctx.roles} + ]}}, + {info, { + [ + {authentication_handlers, [ + N + || {N, _Fun} <- Req#httpd.authentication_handlers + ]} + ] ++ + maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) -> + Handler + end) ++ + maybe_value( + authentication_db, + config:get("chttpd_auth", "authentication_db"), + fun(Val) -> + ?l2b(Val) + end + ) + }} + ]} + ) end; % logout by deleting the session -handle_session_req(#httpd{method='DELETE'}=Req, _AuthModule) -> - Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++ - cookie_domain() ++ cookie_scheme(Req)), - {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of - nil -> - {200, [Cookie]}; - Redirect -> - {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]} - end, +handle_session_req(#httpd{method = 'DELETE'} = Req, _AuthModule) -> + Cookie = mochiweb_cookies:cookie( + "AuthSession", + "", + [{path, "/"}] ++ + cookie_domain() ++ cookie_scheme(Req) + ), + {Code, Headers} = + case couch_httpd:qs_value(Req, "next", nil) of + nil -> + {200, [Cookie]}; + Redirect -> + {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]} + end, send_json(Req, Code, Headers, {[{ok, true}]}); handle_session_req(Req, _AuthModule) -> send_method_not_allowed(Req, "GET,HEAD,POST,DELETE"). @@ -433,22 +536,25 @@ extract_username(Form) -> end. maybe_value(_Key, undefined, _Fun) -> []; -maybe_value(Key, Else, Fun) -> - [{Key, Fun(Else)}]. +maybe_value(Key, Else, Fun) -> [{Key, Fun(Else)}]. authenticate(Pass, UserProps) -> UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<>>), {PasswordHash, ExpectedHash} = case couch_util:get_value(<<"password_scheme">>, UserProps, <<"simple">>) of - <<"simple">> -> - {couch_passwords:simple(Pass, UserSalt), - couch_util:get_value(<<"password_sha">>, UserProps, nil)}; - <<"pbkdf2">> -> - Iterations = couch_util:get_value(<<"iterations">>, UserProps, 10000), - verify_iterations(Iterations), - {couch_passwords:pbkdf2(Pass, UserSalt, Iterations), - couch_util:get_value(<<"derived_key">>, UserProps, nil)} - end, + <<"simple">> -> + { + couch_passwords:simple(Pass, UserSalt), + couch_util:get_value(<<"password_sha">>, UserProps, nil) + }; + <<"pbkdf2">> -> + Iterations = couch_util:get_value(<<"iterations">>, UserProps, 10000), + verify_iterations(Iterations), + { + couch_passwords:pbkdf2(Pass, UserSalt, Iterations), + couch_util:get_value(<<"derived_key">>, UserProps, nil) + } + end, couch_passwords:verify(PasswordHash, ExpectedHash). verify_iterations(Iterations) when is_integer(Iterations) -> @@ -471,21 +577,25 @@ make_cookie_time() -> {NowMS, NowS, _} = os:timestamp(), NowMS * 1000000 + NowS. -cookie_scheme(#httpd{mochi_req=MochiReq}) -> +cookie_scheme(#httpd{mochi_req = MochiReq}) -> [{http_only, true}] ++ - case MochiReq:get(scheme) of - http -> []; - https -> [{secure, true}] - end. + case MochiReq:get(scheme) of + http -> []; + https -> [{secure, true}] + end. max_age() -> - case chttpd_util:get_chttpd_auth_config_boolean( - "allow_persistent_cookies", true) of + case + chttpd_util:get_chttpd_auth_config_boolean( + "allow_persistent_cookies", true + ) + of false -> []; true -> Timeout = chttpd_util:get_chttpd_auth_config_integer( - "timeout", 600), + "timeout", 600 + ), [{max_age, Timeout}] end. @@ -496,20 +606,22 @@ cookie_domain() -> _ -> [{domain, Domain}] end. - same_site() -> SameSite = chttpd_util:get_chttpd_auth_config("same_site", ""), case string:to_lower(SameSite) of - "" -> []; - "none" -> [{same_site, none}]; - "lax" -> [{same_site, lax}]; - "strict" -> [{same_site, strict}]; + "" -> + []; + "none" -> + [{same_site, none}]; + "lax" -> + [{same_site, lax}]; + "strict" -> + [{same_site, strict}]; _ -> - couch_log:error("invalid config value couch_httpd_auth.same_site: ~p ",[SameSite]), + couch_log:error("invalid config value couch_httpd_auth.same_site: ~p ", [SameSite]), [] end. - reject_if_totp(User) -> case get_totp_config(User) of undefined -> @@ -525,7 +637,8 @@ verify_totp(User, Form) -> {Props} -> Key = couch_base32:decode(couch_util:get_value(<<"key">>, Props)), Alg = couch_util:to_existing_atom( - couch_util:get_value(<<"algorithm">>, Props, <<"sha">>)), + couch_util:get_value(<<"algorithm">>, Props, <<"sha">>) + ), Len = couch_util:get_value(<<"length">>, Props, 6), Token = ?l2b(couch_util:get_value("token", Form, "")), verify_token(Alg, Key, Len, Token) @@ -536,12 +649,17 @@ get_totp_config(User) -> verify_token(Alg, Key, Len, Token) -> Now = make_cookie_time(), - Tokens = [generate_token(Alg, Key, Len, Now - 30), - generate_token(Alg, Key, Len, Now), - generate_token(Alg, Key, Len, Now + 30)], + Tokens = [ + generate_token(Alg, Key, Len, Now - 30), + generate_token(Alg, Key, Len, Now), + generate_token(Alg, Key, Len, Now + 30) + ], %% evaluate all tokens in constant time - Match = lists:foldl(fun(T, Acc) -> couch_util:verify(T, Token) or Acc end, - false, Tokens), + Match = lists:foldl( + fun(T, Acc) -> couch_util:verify(T, Token) or Acc end, + false, + Tokens + ), case Match of true -> ok; @@ -553,17 +671,20 @@ generate_token(Alg, Key, Len, Timestamp) -> integer_to_binary(couch_totp:generate(Alg, Key, Timestamp, 30, Len), Len). integer_to_binary(Int, Len) when is_integer(Int), is_integer(Len) -> - Unpadded = case erlang:function_exported(erlang, integer_to_binary, 1) of - true -> - erlang:integer_to_binary(Int); - false -> - ?l2b(integer_to_list(Int)) - end, + Unpadded = + case erlang:function_exported(erlang, integer_to_binary, 1) of + true -> + erlang:integer_to_binary(Int); + false -> + ?l2b(integer_to_list(Int)) + end, Padding = binary:copy(<<"0">>, Len), Padded = <<Padding/binary, Unpadded/binary>>, binary:part(Padded, byte_size(Padded), -Len). authentication_warning(#httpd{mochi_req = Req}, User) -> Peer = Req:get(peer), - couch_log:warning("~p: Authentication failed for user ~s from ~s", - [?MODULE, User, Peer]). + couch_log:warning( + "~p: Authentication failed for user ~s from ~s", + [?MODULE, User, Peer] + ). diff --git a/src/couch/src/couch_httpd_db.erl b/src/couch/src/couch_httpd_db.erl index 2418c1a4c..e82eea7f7 100644 --- a/src/couch/src/couch_httpd_db.erl +++ b/src/couch/src/couch_httpd_db.erl @@ -16,17 +16,36 @@ -include_lib("couch/include/couch_db.hrl"). --export([handle_request/1, handle_compact_req/2, handle_design_req/2, - db_req/2, couch_doc_open/4, handle_db_changes_req/2, +-export([ + handle_request/1, + handle_compact_req/2, + handle_design_req/2, + db_req/2, + couch_doc_open/4, + handle_db_changes_req/2, update_doc_result_to_json/1, update_doc_result_to_json/2, - handle_design_info_req/3, parse_copy_destination_header/1, - parse_changes_query/2, handle_changes_req/4]). - --import(couch_httpd, - [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2, - start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1, - start_chunked_response/3, absolute_uri/2, send/2, - start_response_length/4, send_error/4]). + handle_design_info_req/3, + parse_copy_destination_header/1, + parse_changes_query/2, + handle_changes_req/4 +]). + +-import( + couch_httpd, + [ + send_json/2, send_json/3, send_json/4, + send_method_not_allowed/2, + start_json_response/2, + send_chunk/2, + last_chunk/1, + end_json_response/1, + start_chunked_response/3, + absolute_uri/2, + send/2, + start_response_length/4, + send_error/4 + ] +). -record(doc_query_args, { options = [], @@ -37,134 +56,148 @@ }). % Database request handlers -handle_request(#httpd{path_parts=[DbName|RestParts],method=Method, - db_url_handlers=DbUrlHandlers}=Req)-> +handle_request( + #httpd{ + path_parts = [DbName | RestParts], + method = Method, + db_url_handlers = DbUrlHandlers + } = Req +) -> case {Method, RestParts} of - {'PUT', []} -> - create_db_req(Req, DbName); - {'DELETE', []} -> - % if we get ?rev=... the user is using a faulty script where the - % document id is empty by accident. Let them recover safely. - case couch_httpd:qs_value(Req, "rev", false) of - false -> delete_db_req(Req, DbName); - _Rev -> throw({bad_request, - "You tried to DELETE a database with a ?rev= parameter. " - ++ "Did you mean to DELETE a document instead?"}) - end; - {_, []} -> - do_db_req(Req, fun db_req/2); - {_, [SecondPart|_]} -> - Handler = couch_util:dict_find(SecondPart, DbUrlHandlers, fun db_req/2), - do_db_req(Req, Handler) + {'PUT', []} -> + create_db_req(Req, DbName); + {'DELETE', []} -> + % if we get ?rev=... the user is using a faulty script where the + % document id is empty by accident. Let them recover safely. + case couch_httpd:qs_value(Req, "rev", false) of + false -> + delete_db_req(Req, DbName); + _Rev -> + throw( + {bad_request, + "You tried to DELETE a database with a ?rev= parameter. " ++ + "Did you mean to DELETE a document instead?"} + ) + end; + {_, []} -> + do_db_req(Req, fun db_req/2); + {_, [SecondPart | _]} -> + Handler = couch_util:dict_find(SecondPart, DbUrlHandlers, fun db_req/2), + do_db_req(Req, Handler) end. - handle_db_changes_req(Req, Db) -> ChangesArgs = parse_changes_query(Req, Db), ChangesFun = couch_changes:handle_db_changes(ChangesArgs, Req, Db), handle_changes_req(Req, Db, ChangesArgs, ChangesFun). - -handle_changes_req(#httpd{method='POST'}=Req, Db, ChangesArgs, ChangesFun) -> +handle_changes_req(#httpd{method = 'POST'} = Req, Db, ChangesArgs, ChangesFun) -> couch_httpd:validate_ctype(Req, "application/json"), handle_changes_req1(Req, Db, ChangesArgs, ChangesFun); -handle_changes_req(#httpd{method='GET'}=Req, Db, ChangesArgs, ChangesFun) -> +handle_changes_req(#httpd{method = 'GET'} = Req, Db, ChangesArgs, ChangesFun) -> handle_changes_req1(Req, Db, ChangesArgs, ChangesFun); -handle_changes_req(#httpd{}=Req, _Db, _ChangesArgs, _ChangesFun) -> +handle_changes_req(#httpd{} = Req, _Db, _ChangesArgs, _ChangesFun) -> couch_httpd:send_method_not_allowed(Req, "GET,HEAD,POST"). handle_changes_req1(Req, Db, ChangesArgs, ChangesFun) -> DbName = couch_db:name(Db), AuthDbName = ?l2b(config:get("couch_httpd_auth", "authentication_db")), case AuthDbName of - DbName -> - % in the authentication database, _changes is admin-only. - ok = couch_db:check_is_admin(Db); - _Else -> - % on other databases, _changes is free for all. - ok + DbName -> + % in the authentication database, _changes is admin-only. + ok = couch_db:check_is_admin(Db); + _Else -> + % on other databases, _changes is free for all. + ok end, MakeCallback = fun(Resp) -> - fun({change, {ChangeProp}=Change, _}, "eventsource") -> - Seq = proplists:get_value(<<"seq">>, ChangeProp), - couch_httpd:send_chunk(Resp, ["data: ", ?JSON_ENCODE(Change), - "\n", "id: ", ?JSON_ENCODE(Seq), - "\n\n"]); - ({change, Change, _}, "continuous") -> - couch_httpd:send_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]); - ({change, Change, Prepend}, _) -> - couch_httpd:send_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]); - (start, "eventsource") -> - ok; - (start, "continuous") -> - ok; - (start, _) -> - couch_httpd:send_chunk(Resp, "{\"results\":[\n"); - ({stop, _EndSeq}, "eventsource") -> - couch_httpd:end_json_response(Resp); - ({stop, EndSeq}, "continuous") -> - couch_httpd:send_chunk( - Resp, - [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"] - ), - couch_httpd:end_json_response(Resp); - ({stop, EndSeq}, _) -> - couch_httpd:send_chunk( - Resp, - io_lib:format("\n],\n\"last_seq\":~w}\n", [EndSeq]) - ), - couch_httpd:end_json_response(Resp); - (timeout, "eventsource") -> - couch_httpd:send_chunk(Resp, "event: heartbeat\ndata: \n\n"); - (timeout, _) -> - couch_httpd:send_chunk(Resp, "\n") + fun + ({change, {ChangeProp} = Change, _}, "eventsource") -> + Seq = proplists:get_value(<<"seq">>, ChangeProp), + couch_httpd:send_chunk(Resp, [ + "data: ", + ?JSON_ENCODE(Change), + "\n", + "id: ", + ?JSON_ENCODE(Seq), + "\n\n" + ]); + ({change, Change, _}, "continuous") -> + couch_httpd:send_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]); + ({change, Change, Prepend}, _) -> + couch_httpd:send_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]); + (start, "eventsource") -> + ok; + (start, "continuous") -> + ok; + (start, _) -> + couch_httpd:send_chunk(Resp, "{\"results\":[\n"); + ({stop, _EndSeq}, "eventsource") -> + couch_httpd:end_json_response(Resp); + ({stop, EndSeq}, "continuous") -> + couch_httpd:send_chunk( + Resp, + [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"] + ), + couch_httpd:end_json_response(Resp); + ({stop, EndSeq}, _) -> + couch_httpd:send_chunk( + Resp, + io_lib:format("\n],\n\"last_seq\":~w}\n", [EndSeq]) + ), + couch_httpd:end_json_response(Resp); + (timeout, "eventsource") -> + couch_httpd:send_chunk(Resp, "event: heartbeat\ndata: \n\n"); + (timeout, _) -> + couch_httpd:send_chunk(Resp, "\n") end end, - WrapperFun = case ChangesArgs#changes_args.feed of - "normal" -> - {ok, Info} = couch_db:get_db_info(Db), - CurrentEtag = couch_httpd:make_etag(Info), - fun(FeedChangesFun) -> - couch_httpd:etag_respond( - Req, - CurrentEtag, - fun() -> - {ok, Resp} = couch_httpd:start_json_response( - Req, 200, [{"ETag", CurrentEtag}] - ), + WrapperFun = + case ChangesArgs#changes_args.feed of + "normal" -> + {ok, Info} = couch_db:get_db_info(Db), + CurrentEtag = couch_httpd:make_etag(Info), + fun(FeedChangesFun) -> + couch_httpd:etag_respond( + Req, + CurrentEtag, + fun() -> + {ok, Resp} = couch_httpd:start_json_response( + Req, 200, [{"ETag", CurrentEtag}] + ), + FeedChangesFun(MakeCallback(Resp)) + end + ) + end; + "eventsource" -> + Headers = [ + {"Content-Type", "text/event-stream"}, + {"Cache-Control", "no-cache"} + ], + {ok, Resp} = couch_httpd:start_chunked_response(Req, 200, Headers), + fun(FeedChangesFun) -> + FeedChangesFun(MakeCallback(Resp)) + end; + _ -> + % "longpoll" or "continuous" + {ok, Resp} = couch_httpd:start_json_response(Req, 200), + fun(FeedChangesFun) -> FeedChangesFun(MakeCallback(Resp)) end - ) - end; - "eventsource" -> - Headers = [ - {"Content-Type", "text/event-stream"}, - {"Cache-Control", "no-cache"} - ], - {ok, Resp} = couch_httpd:start_chunked_response(Req, 200, Headers), - fun(FeedChangesFun) -> - FeedChangesFun(MakeCallback(Resp)) - end; - _ -> - % "longpoll" or "continuous" - {ok, Resp} = couch_httpd:start_json_response(Req, 200), - fun(FeedChangesFun) -> - FeedChangesFun(MakeCallback(Resp)) - end - end, + end, couch_stats:increment_counter( - [couchdb, httpd, clients_requesting_changes]), + [couchdb, httpd, clients_requesting_changes] + ), try WrapperFun(ChangesFun) after couch_stats:decrement_counter( - [couchdb, httpd, clients_requesting_changes]) + [couchdb, httpd, clients_requesting_changes] + ) end. - - -handle_compact_req(#httpd{method='POST'}=Req, Db) -> +handle_compact_req(#httpd{method = 'POST'} = Req, Db) -> case Req#httpd.path_parts of [_DbName, <<"_compact">>] -> ok = couch_db:check_is_admin(Db), @@ -179,24 +212,30 @@ handle_compact_req(#httpd{method='POST'}=Req, Db) -> ), couch_mrview_http:handle_compact_req(Req, Db, DDoc) end; - handle_compact_req(Req, _Db) -> send_method_not_allowed(Req, "POST"). - -handle_design_req(#httpd{ - path_parts=[_DbName, _Design, DesignName, <<"_",_/binary>> = Action | _Rest], +handle_design_req( + #httpd{ + path_parts = [_DbName, _Design, DesignName, <<"_", _/binary>> = Action | _Rest], design_url_handlers = DesignUrlHandlers - }=Req, Db) -> + } = Req, + Db +) -> case couch_db:is_system_db(Db) of - true -> - case (catch couch_db:check_is_admin(Db)) of - ok -> ok; - _ -> - throw({forbidden, <<"Only admins can access design document", - " actions for system databases.">>}) - end; - false -> ok + true -> + case (catch couch_db:check_is_admin(Db)) of + ok -> + ok; + _ -> + throw( + {forbidden, + <<"Only admins can access design document", + " actions for system databases.">>} + ) + end; + false -> + ok end, % maybe load ddoc through fabric @@ -212,295 +251,315 @@ handle_design_req(#httpd{ throw({not_found, <<"missing handler: ", Action/binary>>}) end), Handler(Req, Db, DDoc); - handle_design_req(Req, Db) -> db_req(Req, Db). -handle_design_info_req(#httpd{ - method='GET', - path_parts=[_DbName, _Design, DesignName, _] - }=Req, Db, _DDoc) -> +handle_design_info_req( + #httpd{ + method = 'GET', + path_parts = [_DbName, _Design, DesignName, _] + } = Req, + Db, + _DDoc +) -> DesignId = <<"_design/", DesignName/binary>>, DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]), couch_mrview_http:handle_info_req(Req, Db, DDoc). -create_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) -> +create_db_req(#httpd{user_ctx = UserCtx} = Req, DbName) -> ok = couch_httpd:verify_is_server_admin(Req), - Engine = case couch_httpd:qs_value(Req, "engine") of - EngineStr when is_list(EngineStr) -> - [{engine, iolist_to_binary(EngineStr)}]; - _ -> - [] - end, + Engine = + case couch_httpd:qs_value(Req, "engine") of + EngineStr when is_list(EngineStr) -> + [{engine, iolist_to_binary(EngineStr)}]; + _ -> + [] + end, case couch_server:create(DbName, [{user_ctx, UserCtx}] ++ Engine) of - {ok, Db} -> - couch_db:close(Db), - DbUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)), - send_json(Req, 201, [{"Location", DbUrl}], {[{ok, true}]}); - Error -> - throw(Error) + {ok, Db} -> + couch_db:close(Db), + DbUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)), + send_json(Req, 201, [{"Location", DbUrl}], {[{ok, true}]}); + Error -> + throw(Error) end. -delete_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) -> +delete_db_req(#httpd{user_ctx = UserCtx} = Req, DbName) -> ok = couch_httpd:verify_is_server_admin(Req), - Options = case couch_httpd:qs_value(Req, "sync") of - "true" -> [sync, {user_ctx, UserCtx}]; - _ -> [{user_ctx, UserCtx}] - end, + Options = + case couch_httpd:qs_value(Req, "sync") of + "true" -> [sync, {user_ctx, UserCtx}]; + _ -> [{user_ctx, UserCtx}] + end, case couch_server:delete(DbName, Options) of - ok -> - send_json(Req, 200, {[{ok, true}]}); - Error -> - throw(Error) + ok -> + send_json(Req, 200, {[{ok, true}]}); + Error -> + throw(Error) end. -do_db_req(#httpd{user_ctx=UserCtx,path_parts=[DbName|_]}=Req, Fun) -> +do_db_req(#httpd{user_ctx = UserCtx, path_parts = [DbName | _]} = Req, Fun) -> case couch_db:open(DbName, [{user_ctx, UserCtx}]) of - {ok, Db} -> - try - Fun(Req, Db) - after - catch couch_db:close(Db) - end; - Error -> - throw(Error) + {ok, Db} -> + try + Fun(Req, Db) + after + catch couch_db:close(Db) + end; + Error -> + throw(Error) end. -db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) -> +db_req(#httpd{method = 'GET', path_parts = [_DbName]} = Req, Db) -> {ok, DbInfo} = couch_db:get_db_info(Db), send_json(Req, {DbInfo}); - -db_req(#httpd{method='POST',path_parts=[_DbName]}=Req, Db) -> +db_req(#httpd{method = 'POST', path_parts = [_DbName]} = Req, Db) -> couch_httpd:validate_ctype(Req, "application/json"), Doc = couch_db:doc_from_json_obj_validate(Db, couch_httpd:json_body(Req)), validate_attachment_names(Doc), - Doc2 = case Doc#doc.id of - <<"">> -> - Doc#doc{id=couch_uuids:new(), revs={0, []}}; - _ -> - Doc - end, + Doc2 = + case Doc#doc.id of + <<"">> -> + Doc#doc{id = couch_uuids:new(), revs = {0, []}}; + _ -> + Doc + end, DocId = Doc2#doc.id, update_doc(Req, Db, DocId, Doc2); - -db_req(#httpd{path_parts=[_DbName]}=Req, _Db) -> +db_req(#httpd{path_parts = [_DbName]} = Req, _Db) -> send_method_not_allowed(Req, "DELETE,GET,HEAD,POST"); - -db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, Db) -> +db_req(#httpd{method = 'POST', path_parts = [_, <<"_ensure_full_commit">>]} = Req, Db) -> couch_httpd:validate_ctype(Req, "application/json"), _ = couch_httpd:body(Req), StartTime = couch_db:get_instance_start_time(Db), - send_json(Req, 201, {[ - {ok, true}, - {instance_start_time, StartTime} - ]}); - -db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) -> + send_json( + Req, + 201, + {[ + {ok, true}, + {instance_start_time, StartTime} + ]} + ); +db_req(#httpd{path_parts = [_, <<"_ensure_full_commit">>]} = Req, _Db) -> send_method_not_allowed(Req, "POST"); - -db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) -> +db_req(#httpd{method = 'POST', path_parts = [_, <<"_bulk_docs">>]} = Req, Db) -> couch_stats:increment_counter([couchdb, httpd, bulk_requests]), couch_httpd:validate_ctype(Req, "application/json"), {JsonProps} = couch_httpd:json_body_obj(Req), case couch_util:get_value(<<"docs">>, JsonProps) of - undefined -> - send_error(Req, 400, <<"bad_request">>, <<"Missing JSON list of 'docs'">>); - DocsArray -> - couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)), - case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of - "true" -> - Options = [full_commit]; - "false" -> - Options = [delay_commit]; - _ -> - Options = [] - end, - case couch_util:get_value(<<"new_edits">>, JsonProps, true) of - true -> - Docs = lists:map( - fun({ObjProps} = JsonObj) -> - Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj), - validate_attachment_names(Doc), - Id = case Doc#doc.id of - <<>> -> couch_uuids:new(); - Id0 -> Id0 - end, - case couch_util:get_value(<<"_rev">>, ObjProps) of - undefined -> - Revs = {0, []}; - Rev -> - {Pos, RevId} = couch_doc:parse_rev(Rev), - Revs = {Pos, [RevId]} - end, - Doc#doc{id=Id,revs=Revs} - end, - DocsArray), - Options2 = - case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of - true -> [all_or_nothing|Options]; - _ -> Options + undefined -> + send_error(Req, 400, <<"bad_request">>, <<"Missing JSON list of 'docs'">>); + DocsArray -> + couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)), + case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of + "true" -> + Options = [full_commit]; + "false" -> + Options = [delay_commit]; + _ -> + Options = [] end, - case couch_db:update_docs(Db, Docs, Options2) of - {ok, Results} -> - % output the results - DocResults = lists:zipwith(fun update_doc_result_to_json/2, - Docs, Results), - send_json(Req, 201, DocResults); - {aborted, Errors} -> - ErrorsJson = - lists:map(fun update_doc_result_to_json/1, Errors), - send_json(Req, 417, ErrorsJson) - end; - false -> - Docs = lists:map(fun(JsonObj) -> - Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj), - validate_attachment_names(Doc), - Doc - end, DocsArray), - {ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes), - ErrorsJson = - lists:map(fun update_doc_result_to_json/1, Errors), - send_json(Req, 201, ErrorsJson) - end + case couch_util:get_value(<<"new_edits">>, JsonProps, true) of + true -> + Docs = lists:map( + fun({ObjProps} = JsonObj) -> + Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj), + validate_attachment_names(Doc), + Id = + case Doc#doc.id of + <<>> -> couch_uuids:new(); + Id0 -> Id0 + end, + case couch_util:get_value(<<"_rev">>, ObjProps) of + undefined -> + Revs = {0, []}; + Rev -> + {Pos, RevId} = couch_doc:parse_rev(Rev), + Revs = {Pos, [RevId]} + end, + Doc#doc{id = Id, revs = Revs} + end, + DocsArray + ), + Options2 = + case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of + true -> [all_or_nothing | Options]; + _ -> Options + end, + case couch_db:update_docs(Db, Docs, Options2) of + {ok, Results} -> + % output the results + DocResults = lists:zipwith( + fun update_doc_result_to_json/2, + Docs, + Results + ), + send_json(Req, 201, DocResults); + {aborted, Errors} -> + ErrorsJson = + lists:map(fun update_doc_result_to_json/1, Errors), + send_json(Req, 417, ErrorsJson) + end; + false -> + Docs = lists:map( + fun(JsonObj) -> + Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj), + validate_attachment_names(Doc), + Doc + end, + DocsArray + ), + {ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes), + ErrorsJson = + lists:map(fun update_doc_result_to_json/1, Errors), + send_json(Req, 201, ErrorsJson) + end end; -db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) -> +db_req(#httpd{path_parts = [_, <<"_bulk_docs">>]} = Req, _Db) -> send_method_not_allowed(Req, "POST"); - -db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) -> +db_req(#httpd{method = 'POST', path_parts = [_, <<"_purge">>]} = Req, Db) -> couch_stats:increment_counter([couchdb, httpd, purge_requests]), couch_httpd:validate_ctype(Req, "application/json"), {IdRevs} = couch_httpd:json_body_obj(Req), - PurgeReqs = lists:map(fun({Id, JsonRevs}) -> - {couch_uuids:new(), Id, couch_doc:parse_revs(JsonRevs)} - end, IdRevs), + PurgeReqs = lists:map( + fun({Id, JsonRevs}) -> + {couch_uuids:new(), Id, couch_doc:parse_revs(JsonRevs)} + end, + IdRevs + ), {ok, Replies} = couch_db:purge_docs(Db, PurgeReqs), - Results = lists:zipwith(fun({Id, _}, {ok, Reply}) -> - {Id, couch_doc:revs_to_strs(Reply)} - end, IdRevs, Replies), + Results = lists:zipwith( + fun({Id, _}, {ok, Reply}) -> + {Id, couch_doc:revs_to_strs(Reply)} + end, + IdRevs, + Replies + ), {ok, Db2} = couch_db:reopen(Db), PurgeSeq = couch_db:get_purge_seq(Db2), send_json(Req, 200, {[{purge_seq, PurgeSeq}, {purged, {Results}}]}); - -db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) -> +db_req(#httpd{path_parts = [_, <<"_purge">>]} = Req, _Db) -> send_method_not_allowed(Req, "POST"); - -db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) -> +db_req(#httpd{method = 'POST', path_parts = [_, <<"_missing_revs">>]} = Req, Db) -> couch_httpd:validate_ctype(Req, "application/json"), {JsonDocIdRevs} = couch_httpd:json_body_obj(Req), - JsonDocIdRevs2 = [{Id, [couch_doc:parse_rev(RevStr) || RevStr <- RevStrs]} || {Id, RevStrs} <- JsonDocIdRevs], + JsonDocIdRevs2 = [ + {Id, [couch_doc:parse_rev(RevStr) || RevStr <- RevStrs]} + || {Id, RevStrs} <- JsonDocIdRevs + ], {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2), Results2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs, _} <- Results], - send_json(Req, {[ - {missing_revs, {Results2}} - ]}); - -db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) -> + send_json( + Req, + {[ + {missing_revs, {Results2}} + ]} + ); +db_req(#httpd{path_parts = [_, <<"_missing_revs">>]} = Req, _Db) -> send_method_not_allowed(Req, "POST"); - -db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) -> +db_req(#httpd{method = 'POST', path_parts = [_, <<"_revs_diff">>]} = Req, Db) -> couch_httpd:validate_ctype(Req, "application/json"), {JsonDocIdRevs} = couch_httpd:json_body_obj(Req), JsonDocIdRevs2 = [{Id, couch_doc:parse_revs(RevStrs)} || {Id, RevStrs} <- JsonDocIdRevs], {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2), Results2 = - lists:map(fun({Id, MissingRevs, PossibleAncestors}) -> - {Id, - {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++ - if PossibleAncestors == [] -> - []; - true -> - [{possible_ancestors, - couch_doc:revs_to_strs(PossibleAncestors)}] - end}} - end, Results), + lists:map( + fun({Id, MissingRevs, PossibleAncestors}) -> + {Id, { + [{missing, couch_doc:revs_to_strs(MissingRevs)}] ++ + if + PossibleAncestors == [] -> + []; + true -> + [{possible_ancestors, couch_doc:revs_to_strs(PossibleAncestors)}] + end + }} + end, + Results + ), send_json(Req, {Results2}); - -db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) -> +db_req(#httpd{path_parts = [_, <<"_revs_diff">>]} = Req, _Db) -> send_method_not_allowed(Req, "POST"); - -db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>]}=Req, Db) -> +db_req(#httpd{method = 'PUT', path_parts = [_, <<"_security">>]} = Req, Db) -> SecObj = couch_httpd:json_body(Req), ok = couch_db:set_security(Db, SecObj), send_json(Req, {[{<<"ok">>, true}]}); - -db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) -> +db_req(#httpd{method = 'GET', path_parts = [_, <<"_security">>]} = Req, Db) -> send_json(Req, couch_db:get_security(Db)); - -db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) -> +db_req(#httpd{path_parts = [_, <<"_security">>]} = Req, _Db) -> send_method_not_allowed(Req, "PUT,GET"); - -db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>]}=Req, - Db) -> +db_req( + #httpd{method = 'PUT', path_parts = [_, <<"_revs_limit">>]} = Req, + Db +) -> Limit = couch_httpd:json_body(Req), - case is_integer(Limit) of - true -> - ok = couch_db:set_revs_limit(Db, Limit), - send_json(Req, {[{<<"ok">>, true}]}); - false -> - throw({bad_request, <<"Rev limit has to be an integer">>}) - end; - -db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) -> + case is_integer(Limit) of + true -> + ok = couch_db:set_revs_limit(Db, Limit), + send_json(Req, {[{<<"ok">>, true}]}); + false -> + throw({bad_request, <<"Rev limit has to be an integer">>}) + end; +db_req(#httpd{method = 'GET', path_parts = [_, <<"_revs_limit">>]} = Req, Db) -> send_json(Req, couch_db:get_revs_limit(Db)); - -db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) -> +db_req(#httpd{path_parts = [_, <<"_revs_limit">>]} = Req, _Db) -> send_method_not_allowed(Req, "PUT,GET"); - % Special case to enable using an unencoded slash in the URL of design docs, % as slashes in document IDs must otherwise be URL encoded. -db_req(#httpd{method='GET',mochi_req=MochiReq, path_parts=[DbName,<<"_design/",_/binary>>|_]}=Req, _Db) -> +db_req( + #httpd{ + method = 'GET', mochi_req = MochiReq, path_parts = [DbName, <<"_design/", _/binary>> | _] + } = Req, + _Db +) -> PathFront = "/" ++ couch_httpd:quote(binary_to_list(DbName)) ++ "/", - [_|PathTail] = re:split(MochiReq:get(raw_path), "_design%2F", - [{return, list}]), - couch_httpd:send_redirect(Req, PathFront ++ "_design/" ++ - mochiweb_util:join(PathTail, "_design%2F")); - -db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) -> - db_doc_req(Req, Db, <<"_design/",Name/binary>>); - -db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) -> - db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts); - - + [_ | PathTail] = re:split( + MochiReq:get(raw_path), + "_design%2F", + [{return, list}] + ), + couch_httpd:send_redirect( + Req, + PathFront ++ "_design/" ++ + mochiweb_util:join(PathTail, "_design%2F") + ); +db_req(#httpd{path_parts = [_DbName, <<"_design">>, Name]} = Req, Db) -> + db_doc_req(Req, Db, <<"_design/", Name/binary>>); +db_req(#httpd{path_parts = [_DbName, <<"_design">>, Name | FileNameParts]} = Req, Db) -> + db_attachment_req(Req, Db, <<"_design/", Name/binary>>, FileNameParts); % Special case to allow for accessing local documents without %2F % encoding the docid. Throws out requests that don't have the second % path part or that specify an attachment name. -db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) -> +db_req(#httpd{path_parts = [_DbName, <<"_local">>]}, _Db) -> throw({bad_request, <<"Invalid _local document id.">>}); - -db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) -> +db_req(#httpd{path_parts = [_DbName, <<"_local/">>]}, _Db) -> throw({bad_request, <<"Invalid _local document id.">>}); - -db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) -> +db_req(#httpd{path_parts = [_DbName, <<"_local">>, Name]} = Req, Db) -> db_doc_req(Req, Db, <<"_local/", Name/binary>>); - -db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) -> +db_req(#httpd{path_parts = [_DbName, <<"_local">> | _Rest]}, _Db) -> throw({bad_request, <<"_local documents do not accept attachments.">>}); - -db_req(#httpd{path_parts=[_, DocId]}=Req, Db) -> +db_req(#httpd{path_parts = [_, DocId]} = Req, Db) -> db_doc_req(Req, Db, DocId); - -db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) -> +db_req(#httpd{path_parts = [_, DocId | FileNameParts]} = Req, Db) -> db_attachment_req(Req, Db, DocId, FileNameParts). -db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) -> +db_doc_req(#httpd{method = 'DELETE'} = Req, Db, DocId) -> % check for the existence of the doc to handle the 404 case. couch_doc_open(Db, DocId, nil, []), case couch_httpd:qs_value(Req, "rev") of - undefined -> - JsonObj = {[{<<"_deleted">>,true}]}, - Doc = couch_doc_from_req(Req, Db, DocId, JsonObj), - update_doc(Req, Db, DocId, Doc); - Rev -> - JsonObj = {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}, - Doc = couch_doc_from_req(Req, Db, DocId, JsonObj), - update_doc(Req, Db, DocId, Doc) + undefined -> + JsonObj = {[{<<"_deleted">>, true}]}, + Doc = couch_doc_from_req(Req, Db, DocId, JsonObj), + update_doc(Req, Db, DocId, Doc); + Rev -> + JsonObj = {[{<<"_rev">>, ?l2b(Rev)}, {<<"_deleted">>, true}]}, + Doc = couch_doc_from_req(Req, Db, DocId, JsonObj), + update_doc(Req, Db, DocId, Doc) end; - db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) -> #doc_query_args{ rev = Rev, @@ -508,206 +567,243 @@ db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) -> options = Options1, atts_since = AttsSince } = parse_doc_query(Req), - Options = case AttsSince of - nil -> - Options1; - RevList when is_list(RevList) -> - [{atts_since, RevList}, attachments | Options1] - end, + Options = + case AttsSince of + nil -> + Options1; + RevList when is_list(RevList) -> + [{atts_since, RevList}, attachments | Options1] + end, case Revs of - [] -> - Doc = couch_doc_open(Db, DocId, Rev, Options), - send_doc(Req, Doc, Options); - _ -> - {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options), - case MochiReq:accepts_content_type("multipart/mixed") of - false -> - {ok, Resp} = start_json_response(Req, 200), - send_chunk(Resp, "["), - % We loop through the docs. The first time through the separator - % is whitespace, then a comma on subsequent iterations. - lists:foldl( - fun(Result, AccSeparator) -> - case Result of - {ok, Doc} -> - JsonDoc = couch_doc:to_json_obj(Doc, Options), - Json = ?JSON_ENCODE({[{ok, JsonDoc}]}), - send_chunk(Resp, AccSeparator ++ Json); - {{not_found, missing}, RevId} -> - RevStr = couch_doc:rev_to_str(RevId), - Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}), - send_chunk(Resp, AccSeparator ++ Json) - end, - "," % AccSeparator now has a comma - end, - "", Results), - send_chunk(Resp, "]"), - end_json_response(Resp); - true -> - send_docs_multipart(Req, Results, Options) - end + [] -> + Doc = couch_doc_open(Db, DocId, Rev, Options), + send_doc(Req, Doc, Options); + _ -> + {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options), + case MochiReq:accepts_content_type("multipart/mixed") of + false -> + {ok, Resp} = start_json_response(Req, 200), + send_chunk(Resp, "["), + % We loop through the docs. The first time through the separator + % is whitespace, then a comma on subsequent iterations. + lists:foldl( + fun(Result, AccSeparator) -> + case Result of + {ok, Doc} -> + JsonDoc = couch_doc:to_json_obj(Doc, Options), + Json = ?JSON_ENCODE({[{ok, JsonDoc}]}), + send_chunk(Resp, AccSeparator ++ Json); + {{not_found, missing}, RevId} -> + RevStr = couch_doc:rev_to_str(RevId), + Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}), + send_chunk(Resp, AccSeparator ++ Json) + end, + % AccSeparator now has a comma + "," + end, + "", + Results + ), + send_chunk(Resp, "]"), + end_json_response(Resp); + true -> + send_docs_multipart(Req, Results, Options) + end end; - - -db_doc_req(#httpd{method='POST'}=Req, Db, DocId) -> +db_doc_req(#httpd{method = 'POST'} = Req, Db, DocId) -> couch_httpd:validate_referer(Req), couch_db:validate_docid(Db, DocId), couch_httpd:validate_ctype(Req, "multipart/form-data"), Form = couch_httpd:parse_form(Req), case couch_util:get_value("_doc", Form) of - undefined -> - Rev = couch_doc:parse_rev(couch_util:get_value("_rev", Form)), - {ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []); - Json -> - Doc = couch_doc_from_req(Req, Db, DocId, ?JSON_DECODE(Json)) + undefined -> + Rev = couch_doc:parse_rev(couch_util:get_value("_rev", Form)), + {ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []); + Json -> + Doc = couch_doc_from_req(Req, Db, DocId, ?JSON_DECODE(Json)) end, UpdatedAtts = [ couch_att:new([ {name, validate_attachment_name(Name)}, {type, list_to_binary(ContentType)}, {data, Content} - ]) || - {Name, {ContentType, _}, Content} <- - proplists:get_all_values("_attachments", Form) + ]) + || {Name, {ContentType, _}, Content} <- + proplists:get_all_values("_attachments", Form) ], - #doc{atts=OldAtts} = Doc, + #doc{atts = OldAtts} = Doc, OldAtts2 = lists:flatmap( fun(Att) -> OldName = couch_att:fetch(name, Att), case [1 || A <- UpdatedAtts, couch_att:fetch(name, A) == OldName] of - [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it - _ -> [] % the attachment was in the UpdatedAtts, drop it + % the attachment wasn't in the UpdatedAtts, return it + [] -> [Att]; + % the attachment was in the UpdatedAtts, drop it + _ -> [] end - end, OldAtts), + end, + OldAtts + ), NewDoc = Doc#doc{ atts = UpdatedAtts ++ OldAtts2 }, update_doc(Req, Db, DocId, NewDoc); - -db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) -> +db_doc_req(#httpd{method = 'PUT'} = Req, Db, DocId) -> couch_db:validate_docid(Db, DocId), case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of - ("multipart/related;" ++ _) = ContentType -> - couch_httpd:check_max_request_length(Req), - {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream( - ContentType, fun() -> receive_request_data(Req) end), - Doc = couch_doc_from_req(Req, Db, DocId, Doc0), - try - Result = update_doc(Req, Db, DocId, Doc), - WaitFun(), - Result - catch throw:Err -> - % Document rejected by a validate_doc_update function. - couch_httpd_multipart:abort_multipart_stream(Parser), - throw(Err) - end; - _Else -> - Body = couch_httpd:json_body(Req), - Doc = couch_doc_from_req(Req, Db, DocId, Body), - update_doc(Req, Db, DocId, Doc) + ("multipart/related;" ++ _) = ContentType -> + couch_httpd:check_max_request_length(Req), + {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream( + ContentType, fun() -> receive_request_data(Req) end + ), + Doc = couch_doc_from_req(Req, Db, DocId, Doc0), + try + Result = update_doc(Req, Db, DocId, Doc), + WaitFun(), + Result + catch + throw:Err -> + % Document rejected by a validate_doc_update function. + couch_httpd_multipart:abort_multipart_stream(Parser), + throw(Err) + end; + _Else -> + Body = couch_httpd:json_body(Req), + Doc = couch_doc_from_req(Req, Db, DocId, Body), + update_doc(Req, Db, DocId, Doc) end; - -db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) -> +db_doc_req(#httpd{method = 'COPY'} = Req, Db, SourceDocId) -> SourceRev = - case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of - missing_rev -> nil; - Rev -> Rev - end, + case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of + missing_rev -> nil; + Rev -> Rev + end, {TargetDocId0, TargetRevs} = parse_copy_destination_header(Req), TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)), % open old doc Doc = couch_doc_open(Db, SourceDocId, SourceRev, []), % save new doc - update_doc(Req, Db, TargetDocId, Doc#doc{id=TargetDocId, revs=TargetRevs}); - + update_doc(Req, Db, TargetDocId, Doc#doc{id = TargetDocId, revs = TargetRevs}); db_doc_req(Req, _Db, _DocId) -> send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY"). - send_doc(Req, Doc, Options) -> case Doc#doc.meta of - [] -> - DiskEtag = couch_httpd:doc_etag(Doc), - % output etag only when we have no meta - couch_httpd:etag_respond(Req, DiskEtag, fun() -> - send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options) - end); - _ -> - send_doc_efficiently(Req, Doc, [], Options) + [] -> + DiskEtag = couch_httpd:doc_etag(Doc), + % output etag only when we have no meta + couch_httpd:etag_respond(Req, DiskEtag, fun() -> + send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options) + end); + _ -> + send_doc_efficiently(Req, Doc, [], Options) end. - -send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) -> - send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options)); -send_doc_efficiently(#httpd{mochi_req = MochiReq} = Req, - #doc{atts = Atts} = Doc, Headers, Options) -> +send_doc_efficiently(Req, #doc{atts = []} = Doc, Headers, Options) -> + send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options)); +send_doc_efficiently( + #httpd{mochi_req = MochiReq} = Req, + #doc{atts = Atts} = Doc, + Headers, + Options +) -> case lists:member(attachments, Options) of - true -> - case MochiReq:accepts_content_type("multipart/related") of - false -> - send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options)); true -> - Boundary = couch_uuids:random(), - JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, - [attachments, follows, att_encoding_info | Options])), - {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream( - Boundary,JsonBytes, Atts, true), - CType = {"Content-Type", ?b2l(ContentType)}, - {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len), - couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts, - fun(Data) -> couch_httpd:send(Resp, Data) end, true) - end; - false -> - send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options)) + case MochiReq:accepts_content_type("multipart/related") of + false -> + send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options)); + true -> + Boundary = couch_uuids:random(), + JsonBytes = ?JSON_ENCODE( + couch_doc:to_json_obj( + Doc, + [attachments, follows, att_encoding_info | Options] + ) + ), + {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream( + Boundary, JsonBytes, Atts, true + ), + CType = {"Content-Type", ?b2l(ContentType)}, + {ok, Resp} = start_response_length(Req, 200, [CType | Headers], Len), + couch_doc:doc_to_multi_part_stream( + Boundary, + JsonBytes, + Atts, + fun(Data) -> couch_httpd:send(Resp, Data) end, + true + ) + end; + false -> + send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options)) end. send_docs_multipart(Req, Results, Options1) -> OuterBoundary = couch_uuids:random(), InnerBoundary = couch_uuids:random(), Options = [attachments, follows, att_encoding_info | Options1], - CType = {"Content-Type", - "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""}, + CType = {"Content-Type", "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""}, {ok, Resp} = start_chunked_response(Req, 200, [CType]), couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>), lists:foreach( - fun({ok, #doc{atts=Atts}=Doc}) -> - JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)), - {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream( - InnerBoundary, JsonBytes, Atts, true), - couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ", - ContentType/binary, "\r\n\r\n">>), - couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts, - fun(Data) -> couch_httpd:send_chunk(Resp, Data) - end, true), - couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>); - ({{not_found, missing}, RevId}) -> - RevStr = couch_doc:rev_to_str(RevId), - Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}), - couch_httpd:send_chunk(Resp, - [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>, - Json, - <<"\r\n--", OuterBoundary/binary>>]) - end, Results), + fun + ({ok, #doc{atts = Atts} = Doc}) -> + JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)), + {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream( + InnerBoundary, JsonBytes, Atts, true + ), + couch_httpd:send_chunk( + Resp, <<"\r\nContent-Type: ", ContentType/binary, "\r\n\r\n">> + ), + couch_doc:doc_to_multi_part_stream( + InnerBoundary, + JsonBytes, + Atts, + fun(Data) -> couch_httpd:send_chunk(Resp, Data) end, + true + ), + couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>); + ({{not_found, missing}, RevId}) -> + RevStr = couch_doc:rev_to_str(RevId), + Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}), + couch_httpd:send_chunk( + Resp, + [ + <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>, + Json, + <<"\r\n--", OuterBoundary/binary>> + ] + ) + end, + Results + ), couch_httpd:send_chunk(Resp, <<"--">>), couch_httpd:last_chunk(Resp). send_ranges_multipart(Req, ContentType, Len, Att, Ranges) -> Boundary = couch_uuids:random(), - CType = {"Content-Type", - "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""}, + CType = {"Content-Type", "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""}, {ok, Resp} = start_chunked_response(Req, 206, [CType]), couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>), - lists:foreach(fun({From, To}) -> - ContentRange = ?l2b(make_content_range(From, To, Len)), - couch_httpd:send_chunk(Resp, - <<"\r\nContent-Type: ", ContentType/binary, "\r\n", - "Content-Range: ", ContentRange/binary, "\r\n", - "\r\n">>), - couch_att:range_foldl(Att, From, To + 1, - fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}), - couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>) - end, Ranges), + lists:foreach( + fun({From, To}) -> + ContentRange = ?l2b(make_content_range(From, To, Len)), + couch_httpd:send_chunk( + Resp, + <<"\r\nContent-Type: ", ContentType/binary, "\r\n", "Content-Range: ", + ContentRange/binary, "\r\n", "\r\n">> + ), + couch_att:range_foldl( + Att, + From, + To + 1, + fun(Seg, _) -> send_chunk(Resp, Seg) end, + {ok, Resp} + ), + couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>) + end, + Ranges + ), couch_httpd:send_chunk(Resp, <<"--">>), couch_httpd:last_chunk(Resp), {ok, Resp}. @@ -726,11 +822,15 @@ make_content_range(From, To, Len) -> io_lib:format("bytes ~B-~B/~B", [From, To, Len]). update_doc_result_to_json({{Id, Rev}, Error}) -> - {_Code, Err, Msg} = couch_httpd:error_info(Error), - {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)}, - {error, Err}, {reason, Msg}]}. - -update_doc_result_to_json(#doc{id=DocId}, Result) -> + {_Code, Err, Msg} = couch_httpd:error_info(Error), + {[ + {id, Id}, + {rev, couch_doc:rev_to_str(Rev)}, + {error, Err}, + {reason, Msg} + ]}. + +update_doc_result_to_json(#doc{id = DocId}, Result) -> update_doc_result_to_json(DocId, Result); update_doc_result_to_json(DocId, {ok, NewRev}) -> {[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]}; @@ -738,10 +838,11 @@ update_doc_result_to_json(DocId, Error) -> {_Code, ErrorStr, Reason} = couch_httpd:error_info(Error), {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}. - -update_doc(Req, Db, DocId, #doc{deleted=false}=Doc) -> +update_doc(Req, Db, DocId, #doc{deleted = false} = Doc) -> DbName = couch_db:name(Db), - Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(DocId)), + Loc = absolute_uri( + Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(DocId) + ), update_doc(Req, Db, DocId, Doc, [{"Location", Loc}]); update_doc(Req, Db, DocId, Doc) -> update_doc(Req, Db, DocId, Doc, []). @@ -752,70 +853,85 @@ update_doc(Req, Db, DocId, Doc, Headers) -> } = parse_doc_query(Req), update_doc(Req, Db, DocId, Doc, Headers, UpdateType). -update_doc(Req, Db, DocId, #doc{deleted=Deleted}=Doc, Headers, UpdateType) -> +update_doc(Req, Db, DocId, #doc{deleted = Deleted} = Doc, Headers, UpdateType) -> case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of - "true" -> - Options = [full_commit]; - "false" -> - Options = [delay_commit]; - _ -> - Options = [] + "true" -> + Options = [full_commit]; + "false" -> + Options = [delay_commit]; + _ -> + Options = [] end, case couch_httpd:qs_value(Req, "batch") of - "ok" -> - % async batching - spawn(fun() -> - case catch(couch_db:update_doc(Db, Doc, Options, UpdateType)) of - {ok, _} -> ok; - Error -> - couch_log:info("Batch doc error (~s): ~p",[DocId, Error]) + "ok" -> + % async batching + spawn(fun() -> + case catch (couch_db:update_doc(Db, Doc, Options, UpdateType)) of + {ok, _} -> ok; + Error -> couch_log:info("Batch doc error (~s): ~p", [DocId, Error]) end end), - send_json(Req, 202, Headers, {[ - {ok, true}, - {id, DocId} - ]}); - _Normal -> - % normal - {ok, NewRev} = couch_db:update_doc(Db, Doc, Options, UpdateType), - NewRevStr = couch_doc:rev_to_str(NewRev), - ResponseHeaders = [{"ETag", <<"\"", NewRevStr/binary, "\"">>}] ++ Headers, - send_json(Req, - if Deleted orelse Req#httpd.method == 'DELETE' -> 200; - true -> 201 end, - ResponseHeaders, {[ - {ok, true}, - {id, DocId}, - {rev, NewRevStr}]}) + send_json( + Req, + 202, + Headers, + {[ + {ok, true}, + {id, DocId} + ]} + ); + _Normal -> + % normal + {ok, NewRev} = couch_db:update_doc(Db, Doc, Options, UpdateType), + NewRevStr = couch_doc:rev_to_str(NewRev), + ResponseHeaders = [{"ETag", <<"\"", NewRevStr/binary, "\"">>}] ++ Headers, + send_json( + Req, + if + Deleted orelse Req#httpd.method == 'DELETE' -> 200; + true -> 201 + end, + ResponseHeaders, + {[ + {ok, true}, + {id, DocId}, + {rev, NewRevStr} + ]} + ) end. -couch_doc_from_req(Req, _Db, DocId, #doc{revs=Revs}=Doc) -> +couch_doc_from_req(Req, _Db, DocId, #doc{revs = Revs} = Doc) -> validate_attachment_names(Doc), - Rev = case couch_httpd:qs_value(Req, "rev") of - undefined -> - undefined; - QSRev -> - couch_doc:parse_rev(QSRev) - end, + Rev = + case couch_httpd:qs_value(Req, "rev") of + undefined -> + undefined; + QSRev -> + couch_doc:parse_rev(QSRev) + end, Revs2 = - case Revs of - {Start, [RevId|_]} -> - if Rev /= undefined andalso Rev /= {Start, RevId} -> - throw({bad_request, "Document rev from request body and query " - "string have different values"}); - true -> - case extract_header_rev(Req, {Start, RevId}) of - missing_rev -> {0, []}; - _ -> Revs - end - end; - _ -> - case extract_header_rev(Req, Rev) of - missing_rev -> {0, []}; - {Pos, RevId2} -> {Pos, [RevId2]} - end - end, - Doc#doc{id=DocId, revs=Revs2}; + case Revs of + {Start, [RevId | _]} -> + if + Rev /= undefined andalso Rev /= {Start, RevId} -> + throw( + {bad_request, + "Document rev from request body and query " + "string have different values"} + ); + true -> + case extract_header_rev(Req, {Start, RevId}) of + missing_rev -> {0, []}; + _ -> Revs + end + end; + _ -> + case extract_header_rev(Req, Rev) of + missing_rev -> {0, []}; + {Pos, RevId2} -> {Pos, [RevId2]} + end + end, + Doc#doc{id = DocId, revs = Revs2}; couch_doc_from_req(Req, Db, DocId, Json) -> Doc = couch_db:doc_from_json_obj_validate(Db, Json), couch_doc_from_req(Req, Db, DocId, Doc). @@ -826,233 +942,283 @@ couch_doc_from_req(Req, Db, DocId, Json) -> couch_doc_open(Db, DocId, Rev, Options) -> case Rev of - nil -> % open most recent rev - case couch_db:open_doc(Db, DocId, Options) of - {ok, Doc} -> - Doc; - Error -> - throw(Error) - end; - _ -> % open a specific rev (deletions come back as stubs) - case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of - {ok, [{ok, Doc}]} -> - Doc; - {ok, [{{not_found, missing}, Rev}]} -> - throw(not_found); - {ok, [Else]} -> - throw(Else) - end - end. + % open most recent rev + nil -> + case couch_db:open_doc(Db, DocId, Options) of + {ok, Doc} -> + Doc; + Error -> + throw(Error) + end; + % open a specific rev (deletions come back as stubs) + _ -> + case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of + {ok, [{ok, Doc}]} -> + Doc; + {ok, [{{not_found, missing}, Rev}]} -> + throw(not_found); + {ok, [Else]} -> + throw(Else) + end + end. % Attachment request handlers -db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) -> - FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts),"/")), +db_attachment_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId, FileNameParts) -> + FileName = list_to_binary( + mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts), "/") + ), #doc_query_args{ - rev=Rev, - options=Options + rev = Rev, + options = Options } = parse_doc_query(Req), #doc{ - atts=Atts + atts = Atts } = Doc = couch_doc_open(Db, DocId, Rev, Options), case [A || A <- Atts, couch_att:fetch(name, A) == FileName] of - [] -> - throw({not_found, "Document is missing attachment"}); - [Att] -> - [Type, Enc, DiskLen, AttLen, Md5] = couch_att:fetch([type, encoding, disk_len, att_len, md5], Att), - Etag = case Md5 of - <<>> -> couch_httpd:doc_etag(Doc); - _ -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\"" - end, - ReqAcceptsAttEnc = lists:member( - atom_to_list(Enc), - couch_httpd:accepted_encodings(Req) - ), - Len = case {Enc, ReqAcceptsAttEnc} of - {identity, _} -> - % stored and served in identity form - DiskLen; - {_, false} when DiskLen =/= AttLen -> - % Stored encoded, but client doesn't accept the encoding we used, - % so we need to decode on the fly. DiskLen is the identity length - % of the attachment. - DiskLen; - {_, true} -> - % Stored and served encoded. AttLen is the encoded length. - AttLen; - _ -> - % We received an encoded attachment and stored it as such, so we - % don't know the identity length. The client doesn't accept the - % encoding, and since we cannot serve a correct Content-Length - % header we'll fall back to a chunked response. - undefined - end, - Headers = [ - {"ETag", Etag}, - {"Cache-Control", "must-revalidate"}, - {"Content-Type", binary_to_list(Type)} - ] ++ case ReqAcceptsAttEnc of - true when Enc =/= identity -> - % RFC 2616 says that the 'identify' encoding should not be used in - % the Content-Encoding header - [{"Content-Encoding", atom_to_list(Enc)}]; - _ -> - [] - end ++ case Enc of - identity -> - [{"Accept-Ranges", "bytes"}]; - _ -> - [{"Accept-Ranges", "none"}] - end, - AttFun = case ReqAcceptsAttEnc of - false -> - fun couch_att:foldl_decode/3; - true -> - fun couch_att:foldl/3 - end, - couch_httpd:etag_respond( - Req, - Etag, - fun() -> - case Len of - undefined -> - {ok, Resp} = start_chunked_response(Req, 200, Headers), - AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}), - last_chunk(Resp); - _ -> - Ranges = parse_ranges(MochiReq:get(range), Len), - case {Enc, Ranges} of - {identity, [{From, To}]} -> - Headers1 = [{"Content-Range", make_content_range(From, To, Len)}] - ++ Headers, - {ok, Resp} = start_response_length(Req, 206, Headers1, To - From + 1), - couch_att:range_foldl(Att, From, To + 1, - fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp}); - {identity, Ranges} when is_list(Ranges) andalso length(Ranges) < 10 -> - send_ranges_multipart(Req, Type, Len, Att, Ranges); + [] -> + throw({not_found, "Document is missing attachment"}); + [Att] -> + [Type, Enc, DiskLen, AttLen, Md5] = couch_att:fetch( + [type, encoding, disk_len, att_len, md5], Att + ), + Etag = + case Md5 of + <<>> -> couch_httpd:doc_etag(Doc); + _ -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\"" + end, + ReqAcceptsAttEnc = lists:member( + atom_to_list(Enc), + couch_httpd:accepted_encodings(Req) + ), + Len = + case {Enc, ReqAcceptsAttEnc} of + {identity, _} -> + % stored and served in identity form + DiskLen; + {_, false} when DiskLen =/= AttLen -> + % Stored encoded, but client doesn't accept the encoding we used, + % so we need to decode on the fly. DiskLen is the identity length + % of the attachment. + DiskLen; + {_, true} -> + % Stored and served encoded. AttLen is the encoded length. + AttLen; + _ -> + % We received an encoded attachment and stored it as such, so we + % don't know the identity length. The client doesn't accept the + % encoding, and since we cannot serve a correct Content-Length + % header we'll fall back to a chunked response. + undefined + end, + Headers = + [ + {"ETag", Etag}, + {"Cache-Control", "must-revalidate"}, + {"Content-Type", binary_to_list(Type)} + ] ++ + case ReqAcceptsAttEnc of + true when Enc =/= identity -> + % RFC 2616 says that the 'identify' encoding should not be used in + % the Content-Encoding header + [{"Content-Encoding", atom_to_list(Enc)}]; _ -> - Headers1 = Headers ++ - if Enc =:= identity orelse ReqAcceptsAttEnc =:= true -> - [{"Content-MD5", base64:encode(Md5)}]; - true -> - [] - end, - {ok, Resp} = start_response_length(Req, 200, Headers1, Len), - AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp}) + [] + end ++ + case Enc of + identity -> + [{"Accept-Ranges", "bytes"}]; + _ -> + [{"Accept-Ranges", "none"}] + end, + AttFun = + case ReqAcceptsAttEnc of + false -> + fun couch_att:foldl_decode/3; + true -> + fun couch_att:foldl/3 + end, + couch_httpd:etag_respond( + Req, + Etag, + fun() -> + case Len of + undefined -> + {ok, Resp} = start_chunked_response(Req, 200, Headers), + AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}), + last_chunk(Resp); + _ -> + Ranges = parse_ranges(MochiReq:get(range), Len), + case {Enc, Ranges} of + {identity, [{From, To}]} -> + Headers1 = + [{"Content-Range", make_content_range(From, To, Len)}] ++ + Headers, + {ok, Resp} = start_response_length( + Req, 206, Headers1, To - From + 1 + ), + couch_att:range_foldl( + Att, + From, + To + 1, + fun(Seg, _) -> send(Resp, Seg) end, + {ok, Resp} + ); + {identity, Ranges} when + is_list(Ranges) andalso length(Ranges) < 10 + -> + send_ranges_multipart(Req, Type, Len, Att, Ranges); + _ -> + Headers1 = + Headers ++ + if + Enc =:= identity orelse ReqAcceptsAttEnc =:= true -> + [{"Content-MD5", base64:encode(Md5)}]; + true -> + [] + end, + {ok, Resp} = start_response_length(Req, 200, Headers1, Len), + AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp}) + end end end - end - ) + ) end; - - -db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) - when (Method == 'PUT') or (Method == 'DELETE') -> +db_attachment_req( + #httpd{method = Method, mochi_req = MochiReq} = Req, Db, DocId, FileNameParts +) when + (Method == 'PUT') or (Method == 'DELETE') +-> FileName = validate_attachment_name( - mochiweb_util:join( - lists:map(fun binary_to_list/1, - FileNameParts),"/")), - NewAtt = case Method of - 'DELETE' -> - []; - _ -> - MimeType = case couch_httpd:header_value(Req,"Content-Type") of - % We could throw an error here or guess by the FileName. - % Currently, just giving it a default. - undefined -> <<"application/octet-stream">>; - CType -> list_to_binary(CType) - end, - Data = case couch_httpd:body_length(Req) of - undefined -> - <<"">>; - {unknown_transfer_encoding, Unknown} -> - exit({unknown_transfer_encoding, Unknown}); - chunked -> - fun(MaxChunkSize, ChunkFun, InitState) -> - couch_httpd:recv_chunked( - Req, MaxChunkSize, ChunkFun, InitState - ) - end; - 0 -> - <<"">>; - Length when is_integer(Length) -> - Expect = case couch_httpd:header_value(Req, "expect") of + mochiweb_util:join( + lists:map( + fun binary_to_list/1, + FileNameParts + ), + "/" + ) + ), + NewAtt = + case Method of + 'DELETE' -> + []; + _ -> + MimeType = + case couch_httpd:header_value(Req, "Content-Type") of + % We could throw an error here or guess by the FileName. + % Currently, just giving it a default. + undefined -> <<"application/octet-stream">>; + CType -> list_to_binary(CType) + end, + Data = + case couch_httpd:body_length(Req) of undefined -> - undefined; - Value when is_list(Value) -> - string:to_lower(Value) + <<"">>; + {unknown_transfer_encoding, Unknown} -> + exit({unknown_transfer_encoding, Unknown}); + chunked -> + fun(MaxChunkSize, ChunkFun, InitState) -> + couch_httpd:recv_chunked( + Req, MaxChunkSize, ChunkFun, InitState + ) + end; + 0 -> + <<"">>; + Length when is_integer(Length) -> + Expect = + case couch_httpd:header_value(Req, "expect") of + undefined -> + undefined; + Value when is_list(Value) -> + string:to_lower(Value) + end, + case Expect of + "100-continue" -> + MochiReq:start_raw_response({100, gb_trees:empty()}); + _Else -> + ok + end, + fun() -> couch_httpd:recv(Req, 0) end; + Length -> + exit({length_not_integer, Length}) end, - case Expect of - "100-continue" -> - MochiReq:start_raw_response({100, gb_trees:empty()}); - _Else -> - ok + AttLen = + case couch_httpd:header_value(Req, "Content-Length") of + undefined -> undefined; + Len -> list_to_integer(Len) end, - fun() -> couch_httpd:recv(Req, 0) end; - Length -> - exit({length_not_integer, Length}) - end, - AttLen = case couch_httpd:header_value(Req,"Content-Length") of - undefined -> undefined; - Len -> list_to_integer(Len) - end, - ContentEnc = string:to_lower(string:strip( - couch_httpd:header_value(Req,"Content-Encoding","identity") - )), - Encoding = case ContentEnc of - "identity" -> - identity; - "gzip" -> - gzip; - _ -> - throw({ - bad_ctype, - "Only gzip and identity content-encodings are supported" - }) - end, - [couch_att:new([ - {name, FileName}, - {type, MimeType}, - {data, Data}, - {att_len, AttLen}, - {md5, get_md5_header(Req)}, - {encoding, Encoding} - ])] - end, + ContentEnc = string:to_lower( + string:strip( + couch_httpd:header_value(Req, "Content-Encoding", "identity") + ) + ), + Encoding = + case ContentEnc of + "identity" -> + identity; + "gzip" -> + gzip; + _ -> + throw({ + bad_ctype, + "Only gzip and identity content-encodings are supported" + }) + end, + [ + couch_att:new([ + {name, FileName}, + {type, MimeType}, + {data, Data}, + {att_len, AttLen}, + {md5, get_md5_header(Req)}, + {encoding, Encoding} + ]) + ] + end, - Doc = case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of - missing_rev -> % make the new doc - if Method =/= 'DELETE' -> ok; true -> - % check for the existence of the doc to handle the 404 case. - couch_doc_open(Db, DocId, nil, []) - end, - couch_db:validate_docid(Db, DocId), - #doc{id=DocId}; - Rev -> - case couch_db:open_doc_revs(Db, DocId, [Rev], []) of - {ok, [{ok, Doc0}]} -> Doc0; - {ok, [{{not_found, missing}, Rev}]} -> throw(conflict); - {ok, [Error]} -> throw(Error) - end - end, + Doc = + case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of + % make the new doc + missing_rev -> + if + Method =/= 'DELETE' -> + ok; + true -> + % check for the existence of the doc to handle the 404 case. + couch_doc_open(Db, DocId, nil, []) + end, + couch_db:validate_docid(Db, DocId), + #doc{id = DocId}; + Rev -> + case couch_db:open_doc_revs(Db, DocId, [Rev], []) of + {ok, [{ok, Doc0}]} -> Doc0; + {ok, [{{not_found, missing}, Rev}]} -> throw(conflict); + {ok, [Error]} -> throw(Error) + end + end, - #doc{atts=Atts} = Doc, + #doc{atts = Atts} = Doc, DocEdited = Doc#doc{ atts = NewAtt ++ [A || A <- Atts, couch_att:fetch(name, A) /= FileName] }, - Headers = case Method of - 'DELETE' -> - []; - _ -> - [{"Location", absolute_uri(Req, "/" ++ - couch_util:url_encode(couch_db:name(Db)) ++ "/" ++ - couch_util:url_encode(DocId) ++ "/" ++ - couch_util:url_encode(FileName) - )}] - end, + Headers = + case Method of + 'DELETE' -> + []; + _ -> + [ + {"Location", + absolute_uri( + Req, + "/" ++ + couch_util:url_encode(couch_db:name(Db)) ++ "/" ++ + couch_util:url_encode(DocId) ++ "/" ++ + couch_util:url_encode(FileName) + )} + ] + end, update_doc(Req, Db, DocId, DocEdited, Headers); - db_attachment_req(Req, _Db, _DocId, _FileNameParts) -> send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT"). @@ -1065,17 +1231,19 @@ parse_ranges(Ranges, Len) -> parse_ranges([], _Len, Acc) -> lists:reverse(Acc); -parse_ranges([{0, none}|_], _Len, _Acc) -> +parse_ranges([{0, none} | _], _Len, _Acc) -> undefined; -parse_ranges([{From, To}|_], _Len, _Acc) when is_integer(From) andalso is_integer(To) andalso To < From -> +parse_ranges([{From, To} | _], _Len, _Acc) when + is_integer(From) andalso is_integer(To) andalso To < From +-> throw(requested_range_not_satisfiable); -parse_ranges([{From, To}|Rest], Len, Acc) when is_integer(To) andalso To >= Len -> - parse_ranges([{From, Len-1}] ++ Rest, Len, Acc); -parse_ranges([{none, To}|Rest], Len, Acc) -> +parse_ranges([{From, To} | Rest], Len, Acc) when is_integer(To) andalso To >= Len -> + parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc); +parse_ranges([{none, To} | Rest], Len, Acc) -> parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc); -parse_ranges([{From, none}|Rest], Len, Acc) -> +parse_ranges([{From, none} | Rest], Len, Acc) -> parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc); -parse_ranges([{From,To}|Rest], Len, Acc) -> +parse_ranges([{From, To} | Rest], Len, Acc) -> parse_ranges(Rest, Len, [{From, To}] ++ Acc). get_md5_header(Req) -> @@ -1099,99 +1267,111 @@ get_md5_header(Req) -> end. parse_doc_query(Req) -> - lists:foldl(fun({Key,Value}, Args) -> - case {Key, Value} of - {"attachments", "true"} -> - Options = [attachments | Args#doc_query_args.options], - Args#doc_query_args{options=Options}; - {"meta", "true"} -> - Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options], - Args#doc_query_args{options=Options}; - {"revs", "true"} -> - Options = [revs | Args#doc_query_args.options], - Args#doc_query_args{options=Options}; - {"local_seq", "true"} -> - Options = [local_seq | Args#doc_query_args.options], - Args#doc_query_args{options=Options}; - {"revs_info", "true"} -> - Options = [revs_info | Args#doc_query_args.options], - Args#doc_query_args{options=Options}; - {"conflicts", "true"} -> - Options = [conflicts | Args#doc_query_args.options], - Args#doc_query_args{options=Options}; - {"deleted_conflicts", "true"} -> - Options = [deleted_conflicts | Args#doc_query_args.options], - Args#doc_query_args{options=Options}; - {"rev", Rev} -> - Args#doc_query_args{rev=couch_doc:parse_rev(Rev)}; - {"open_revs", "all"} -> - Args#doc_query_args{open_revs=all}; - {"open_revs", RevsJsonStr} -> - JsonArray = ?JSON_DECODE(RevsJsonStr), - Args#doc_query_args{open_revs=couch_doc:parse_revs(JsonArray)}; - {"latest", "true"} -> - Options = [latest | Args#doc_query_args.options], - Args#doc_query_args{options=Options}; - {"atts_since", RevsJsonStr} -> - JsonArray = ?JSON_DECODE(RevsJsonStr), - Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)}; - {"new_edits", "false"} -> - Args#doc_query_args{update_type=replicated_changes}; - {"new_edits", "true"} -> - Args#doc_query_args{update_type=interactive_edit}; - {"att_encoding_info", "true"} -> - Options = [att_encoding_info | Args#doc_query_args.options], - Args#doc_query_args{options=Options}; - _Else -> % unknown key value pair, ignore. - Args - end - end, #doc_query_args{}, couch_httpd:qs(Req)). + lists:foldl( + fun({Key, Value}, Args) -> + case {Key, Value} of + {"attachments", "true"} -> + Options = [attachments | Args#doc_query_args.options], + Args#doc_query_args{options = Options}; + {"meta", "true"} -> + Options = [ + revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options + ], + Args#doc_query_args{options = Options}; + {"revs", "true"} -> + Options = [revs | Args#doc_query_args.options], + Args#doc_query_args{options = Options}; + {"local_seq", "true"} -> + Options = [local_seq | Args#doc_query_args.options], + Args#doc_query_args{options = Options}; + {"revs_info", "true"} -> + Options = [revs_info | Args#doc_query_args.options], + Args#doc_query_args{options = Options}; + {"conflicts", "true"} -> + Options = [conflicts | Args#doc_query_args.options], + Args#doc_query_args{options = Options}; + {"deleted_conflicts", "true"} -> + Options = [deleted_conflicts | Args#doc_query_args.options], + Args#doc_query_args{options = Options}; + {"rev", Rev} -> + Args#doc_query_args{rev = couch_doc:parse_rev(Rev)}; + {"open_revs", "all"} -> + Args#doc_query_args{open_revs = all}; + {"open_revs", RevsJsonStr} -> + JsonArray = ?JSON_DECODE(RevsJsonStr), + Args#doc_query_args{open_revs = couch_doc:parse_revs(JsonArray)}; + {"latest", "true"} -> + Options = [latest | Args#doc_query_args.options], + Args#doc_query_args{options = Options}; + {"atts_since", RevsJsonStr} -> + JsonArray = ?JSON_DECODE(RevsJsonStr), + Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)}; + {"new_edits", "false"} -> + Args#doc_query_args{update_type = replicated_changes}; + {"new_edits", "true"} -> + Args#doc_query_args{update_type = interactive_edit}; + {"att_encoding_info", "true"} -> + Options = [att_encoding_info | Args#doc_query_args.options], + Args#doc_query_args{options = Options}; + % unknown key value pair, ignore. + _Else -> + Args + end + end, + #doc_query_args{}, + couch_httpd:qs(Req) + ). parse_changes_query(Req, Db) -> - ChangesArgs = lists:foldl(fun({Key, Value}, Args) -> - case {string:to_lower(Key), Value} of - {"feed", "live"} -> - %% sugar for continuous - Args#changes_args{feed="continuous"}; - {"feed", _} -> - Args#changes_args{feed=Value}; - {"descending", "true"} -> - Args#changes_args{dir=rev}; - {"since", "now"} -> - UpdateSeq = couch_util:with_db(couch_db:name(Db), fun(WDb) -> - couch_db:get_update_seq(WDb) - end), - Args#changes_args{since=UpdateSeq}; - {"since", _} -> - Args#changes_args{since=list_to_integer(Value)}; - {"last-event-id", _} -> - Args#changes_args{since=list_to_integer(Value)}; - {"limit", _} -> - Args#changes_args{limit=list_to_integer(Value)}; - {"style", _} -> - Args#changes_args{style=list_to_existing_atom(Value)}; - {"heartbeat", "true"} -> - Args#changes_args{heartbeat=true}; - {"heartbeat", _} -> - Args#changes_args{heartbeat=list_to_integer(Value)}; - {"timeout", _} -> - Args#changes_args{timeout=list_to_integer(Value)}; - {"include_docs", "true"} -> - Args#changes_args{include_docs=true}; - {"attachments", "true"} -> - Opts = Args#changes_args.doc_options, - Args#changes_args{doc_options=[attachments|Opts]}; - {"att_encoding_info", "true"} -> - Opts = Args#changes_args.doc_options, - Args#changes_args{doc_options=[att_encoding_info|Opts]}; - {"conflicts", "true"} -> - Args#changes_args{conflicts=true}; - {"filter", _} -> - Args#changes_args{filter=Value}; - _Else -> % unknown key value pair, ignore. - Args - end - end, #changes_args{}, couch_httpd:qs(Req)), + ChangesArgs = lists:foldl( + fun({Key, Value}, Args) -> + case {string:to_lower(Key), Value} of + {"feed", "live"} -> + %% sugar for continuous + Args#changes_args{feed = "continuous"}; + {"feed", _} -> + Args#changes_args{feed = Value}; + {"descending", "true"} -> + Args#changes_args{dir = rev}; + {"since", "now"} -> + UpdateSeq = couch_util:with_db(couch_db:name(Db), fun(WDb) -> + couch_db:get_update_seq(WDb) + end), + Args#changes_args{since = UpdateSeq}; + {"since", _} -> + Args#changes_args{since = list_to_integer(Value)}; + {"last-event-id", _} -> + Args#changes_args{since = list_to_integer(Value)}; + {"limit", _} -> + Args#changes_args{limit = list_to_integer(Value)}; + {"style", _} -> + Args#changes_args{style = list_to_existing_atom(Value)}; + {"heartbeat", "true"} -> + Args#changes_args{heartbeat = true}; + {"heartbeat", _} -> + Args#changes_args{heartbeat = list_to_integer(Value)}; + {"timeout", _} -> + Args#changes_args{timeout = list_to_integer(Value)}; + {"include_docs", "true"} -> + Args#changes_args{include_docs = true}; + {"attachments", "true"} -> + Opts = Args#changes_args.doc_options, + Args#changes_args{doc_options = [attachments | Opts]}; + {"att_encoding_info", "true"} -> + Opts = Args#changes_args.doc_options, + Args#changes_args{doc_options = [att_encoding_info | Opts]}; + {"conflicts", "true"} -> + Args#changes_args{conflicts = true}; + {"filter", _} -> + Args#changes_args{filter = Value}; + % unknown key value pair, ignore. + _Else -> + Args + end + end, + #changes_args{}, + couch_httpd:qs(Req) + ), %% if it's an EventSource request with a Last-event-ID header %% that should override the `since` query string, since it's %% probably the browser reconnecting. @@ -1201,60 +1381,62 @@ parse_changes_query(Req, Db) -> undefined -> ChangesArgs; Value -> - ChangesArgs#changes_args{since=list_to_integer(Value)} + ChangesArgs#changes_args{since = list_to_integer(Value)} end; _ -> ChangesArgs end. -extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)-> +extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev) -> extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev)); extract_header_rev(Req, ExplicitRev) -> - Etag = case couch_httpd:header_value(Req, "If-Match") of - undefined -> undefined; - Value -> couch_doc:parse_rev(string:strip(Value, both, $")) - end, + Etag = + case couch_httpd:header_value(Req, "If-Match") of + undefined -> undefined; + Value -> couch_doc:parse_rev(string:strip(Value, both, $")) + end, case {ExplicitRev, Etag} of - {undefined, undefined} -> missing_rev; - {_, undefined} -> ExplicitRev; - {undefined, _} -> Etag; - _ when ExplicitRev == Etag -> Etag; - _ -> - throw({bad_request, "Document rev and etag have different values"}) + {undefined, undefined} -> missing_rev; + {_, undefined} -> ExplicitRev; + {undefined, _} -> Etag; + _ when ExplicitRev == Etag -> Etag; + _ -> throw({bad_request, "Document rev and etag have different values"}) end. - parse_copy_destination_header(Req) -> case couch_httpd:header_value(Req, "Destination") of - undefined -> - throw({bad_request, "Destination header is mandatory for COPY."}); - Destination -> - case re:run(Destination, "^https?://", [{capture, none}]) of - match -> - throw({bad_request, "Destination URL must be relative."}); - nomatch -> - % see if ?rev=revid got appended to the Destination header - case re:run(Destination, "\\?", [{capture, none}]) of - nomatch -> - {list_to_binary(Destination), {0, []}}; - match -> - [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]), - [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]), - {Pos, RevId} = couch_doc:parse_rev(Rev), - {list_to_binary(DocId), {Pos, [RevId]}} + undefined -> + throw({bad_request, "Destination header is mandatory for COPY."}); + Destination -> + case re:run(Destination, "^https?://", [{capture, none}]) of + match -> + throw({bad_request, "Destination URL must be relative."}); + nomatch -> + % see if ?rev=revid got appended to the Destination header + case re:run(Destination, "\\?", [{capture, none}]) of + nomatch -> + {list_to_binary(Destination), {0, []}}; + match -> + [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]), + [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]), + {Pos, RevId} = couch_doc:parse_rev(Rev), + {list_to_binary(DocId), {Pos, [RevId]}} + end end - end end. validate_attachment_names(Doc) -> - lists:foreach(fun(Att) -> - Name = couch_att:fetch(name, Att), - validate_attachment_name(Name) - end, Doc#doc.atts). + lists:foreach( + fun(Att) -> + Name = couch_att:fetch(name, Att), + validate_attachment_name(Name) + end, + Doc#doc.atts + ). validate_attachment_name(Name) when is_list(Name) -> validate_attachment_name(list_to_binary(Name)); -validate_attachment_name(<<"_",_/binary>>) -> +validate_attachment_name(<<"_", _/binary>>) -> throw({bad_request, <<"Attachment name can't start with '_'">>}); validate_attachment_name(Name) -> case couch_util:validate_utf8(Name) of diff --git a/src/couch/src/couch_httpd_misc_handlers.erl b/src/couch/src/couch_httpd_misc_handlers.erl index ea9c1cb84..d9c591875 100644 --- a/src/couch/src/couch_httpd_misc_handlers.erl +++ b/src/couch/src/couch_httpd_misc_handlers.erl @@ -12,87 +12,104 @@ -module(couch_httpd_misc_handlers). --export([handle_welcome_req/2,handle_favicon_req/2,handle_utils_dir_req/2, +-export([ + handle_welcome_req/2, + handle_favicon_req/2, + handle_utils_dir_req/2, handle_all_dbs_req/1, - handle_uuids_req/1,handle_config_req/1, - handle_task_status_req/1, handle_file_req/2]). - + handle_uuids_req/1, + handle_config_req/1, + handle_task_status_req/1, + handle_file_req/2 +]). -include_lib("couch/include/couch_db.hrl"). --import(couch_httpd, - [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2, - start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1, - start_chunked_response/3, send_error/4]). +-import( + couch_httpd, + [ + send_json/2, send_json/3, send_json/4, + send_method_not_allowed/2, + start_json_response/2, + send_chunk/2, + last_chunk/1, + end_json_response/1, + start_chunked_response/3, + send_error/4 + ] +). % httpd global handlers -handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) -> - send_json(Req, {[ - {couchdb, WelcomeMessage}, - {uuid, couch_server:get_uuid()}, - {version, list_to_binary(couch_server:get_version())} - ] ++ case config:get("vendor") of - [] -> - []; - Properties -> - [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}] - end +handle_welcome_req(#httpd{method = 'GET'} = Req, WelcomeMessage) -> + send_json(Req, { + [ + {couchdb, WelcomeMessage}, + {uuid, couch_server:get_uuid()}, + {version, list_to_binary(couch_server:get_version())} + ] ++ + case config:get("vendor") of + [] -> + []; + Properties -> + [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}] + end }); handle_welcome_req(Req, _) -> send_method_not_allowed(Req, "GET,HEAD"). -handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) -> - {{Year,Month,Day},Time} = erlang:universaltime(), - OneYearFromNow = {{Year+1,Month,Day},Time}, +handle_favicon_req(#httpd{method = 'GET'} = Req, DocumentRoot) -> + {{Year, Month, Day}, Time} = erlang:universaltime(), + OneYearFromNow = {{Year + 1, Month, Day}, Time}, CachingHeaders = [ %favicon should expire a year from now {"Cache-Control", "public, max-age=31536000"}, {"Expires", couch_util:rfc1123_date(OneYearFromNow)} ], couch_httpd:serve_file(Req, "favicon.ico", DocumentRoot, CachingHeaders); - handle_favicon_req(Req, _) -> send_method_not_allowed(Req, "GET,HEAD"). -handle_file_req(#httpd{method='GET'}=Req, Document) -> +handle_file_req(#httpd{method = 'GET'} = Req, Document) -> couch_httpd:serve_file(Req, filename:basename(Document), filename:dirname(Document)); - handle_file_req(Req, _) -> send_method_not_allowed(Req, "GET,HEAD"). handle_utils_dir_req(Req, _) -> - send_error(Req, 410, <<"no_node_local_fauxton">>, - ?l2b("The web interface is no longer available on the node-local port.")). - + send_error( + Req, + 410, + <<"no_node_local_fauxton">>, + ?l2b("The web interface is no longer available on the node-local port.") + ). -handle_all_dbs_req(#httpd{method='GET'}=Req) -> +handle_all_dbs_req(#httpd{method = 'GET'} = Req) -> {ok, DbNames} = couch_server:all_databases(), send_json(Req, DbNames); handle_all_dbs_req(Req) -> send_method_not_allowed(Req, "GET,HEAD"). - -handle_task_status_req(#httpd{method='GET'}=Req) -> +handle_task_status_req(#httpd{method = 'GET'} = Req) -> ok = couch_httpd:verify_is_server_admin(Req), % convert the list of prop lists to a list of json objects send_json(Req, [{Props} || Props <- couch_task_status:all()]); handle_task_status_req(Req) -> send_method_not_allowed(Req, "GET,HEAD"). - -handle_uuids_req(#httpd{method='GET'}=Req) -> - Max = config:get_integer("uuids","max_count", 1000), - Count = try list_to_integer(couch_httpd:qs_value(Req, "count", "1")) of - N when N > Max -> - throw({bad_request, <<"count parameter too large">>}); - N when N < 0 -> - throw({bad_request, <<"count must be a positive integer">>}); - N -> N - catch - error:badarg -> - throw({bad_request, <<"count must be a positive integer">>}) - end, +handle_uuids_req(#httpd{method = 'GET'} = Req) -> + Max = config:get_integer("uuids", "max_count", 1000), + Count = + try list_to_integer(couch_httpd:qs_value(Req, "count", "1")) of + N when N > Max -> + throw({bad_request, <<"count parameter too large">>}); + N when N < 0 -> + throw({bad_request, <<"count must be a positive integer">>}); + N -> + N + catch + error:badarg -> + throw({bad_request, <<"count must be a positive integer">>}) + end, UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)], Etag = couch_httpd:make_etag(UUIDs), couch_httpd:etag_respond(Req, Etag, fun() -> @@ -109,51 +126,60 @@ handle_uuids_req(#httpd{method='GET'}=Req) -> handle_uuids_req(Req) -> send_method_not_allowed(Req, "GET"). - % Config request handler - % GET /_config/ % GET /_config -handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) -> +handle_config_req(#httpd{method = 'GET', path_parts = [_]} = Req) -> ok = couch_httpd:verify_is_server_admin(Req), - Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) -> - case dict:is_key(Section, Acc) of - true -> - dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc); - false -> - dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc) - end - end, dict:new(), config:all()), - KVs = dict:fold(fun(Section, Values, Acc) -> - [{list_to_binary(Section), {Values}} | Acc] - end, [], Grouped), + Grouped = lists:foldl( + fun({{Section, Key}, Value}, Acc) -> + case dict:is_key(Section, Acc) of + true -> + dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc); + false -> + dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc) + end + end, + dict:new(), + config:all() + ), + KVs = dict:fold( + fun(Section, Values, Acc) -> + [{list_to_binary(Section), {Values}} | Acc] + end, + [], + Grouped + ), send_json(Req, 200, {KVs}); % GET /_config/Section -handle_config_req(#httpd{method='GET', path_parts=[_,Section]}=Req) -> +handle_config_req(#httpd{method = 'GET', path_parts = [_, Section]} = Req) -> ok = couch_httpd:verify_is_server_admin(Req), - KVs = [{list_to_binary(Key), list_to_binary(Value)} - || {Key, Value} <- config:get(Section)], + KVs = [ + {list_to_binary(Key), list_to_binary(Value)} + || {Key, Value} <- config:get(Section) + ], send_json(Req, 200, {KVs}); % GET /_config/Section/Key -handle_config_req(#httpd{method='GET', path_parts=[_, Section, Key]}=Req) -> +handle_config_req(#httpd{method = 'GET', path_parts = [_, Section, Key]} = Req) -> ok = couch_httpd:verify_is_server_admin(Req), case config:get(Section, Key, undefined) of - undefined -> - throw({not_found, unknown_config_value}); - Value -> - send_json(Req, 200, list_to_binary(Value)) + undefined -> + throw({not_found, unknown_config_value}); + Value -> + send_json(Req, 200, list_to_binary(Value)) end; % POST /_config/_reload - Flushes unpersisted config values from RAM -handle_config_req(#httpd{method='POST', path_parts=[_, <<"_reload">>]}=Req) -> +handle_config_req(#httpd{method = 'POST', path_parts = [_, <<"_reload">>]} = Req) -> couch_httpd:validate_ctype(Req, "application/json"), _ = couch_httpd:body(Req), ok = couch_httpd:verify_is_server_admin(Req), ok = config:reload(), send_json(Req, 200, {[{ok, true}]}); % PUT or DELETE /_config/Section/Key -handle_config_req(#httpd{method=Method, path_parts=[_, Section, Key]}=Req) - when (Method == 'PUT') or (Method == 'DELETE') -> +handle_config_req(#httpd{method = Method, path_parts = [_, Section, Key]} = Req) when + (Method == 'PUT') or (Method == 'DELETE') +-> ok = couch_httpd:verify_is_server_admin(Req), couch_util:check_config_blacklist(Section), Persist = couch_httpd:header_value(Req, "X-Couch-Persist") /= "false", @@ -169,19 +195,25 @@ handle_config_req(#httpd{method=Method, path_parts=[_, Section, Key]}=Req) % variable itself. FallbackWhitelist = [{<<"chttpd">>, <<"config_whitelist">>}], - Whitelist = case couch_util:parse_term(WhitelistValue) of - {ok, Value} when is_list(Value) -> - Value; - {ok, _NonListValue} -> - FallbackWhitelist; - {error, _} -> - [{WhitelistSection, WhitelistKey}] = FallbackWhitelist, - couch_log:error("Only whitelisting ~s/~s due to error" - " parsing: ~p", - [WhitelistSection, WhitelistKey, - WhitelistValue]), - FallbackWhitelist - end, + Whitelist = + case couch_util:parse_term(WhitelistValue) of + {ok, Value} when is_list(Value) -> + Value; + {ok, _NonListValue} -> + FallbackWhitelist; + {error, _} -> + [{WhitelistSection, WhitelistKey}] = FallbackWhitelist, + couch_log:error( + "Only whitelisting ~s/~s due to error" + " parsing: ~p", + [ + WhitelistSection, + WhitelistKey, + WhitelistValue + ] + ), + FallbackWhitelist + end, IsRequestedKeyVal = fun(Element) -> case Element of @@ -207,8 +239,12 @@ handle_config_req(#httpd{method=Method, path_parts=[_, Section, Key]}=Req) handle_approved_config_req(Req, Persist); _NotWhitelisted -> % Disallow modifying this non-whitelisted variable. - send_error(Req, 400, <<"modification_not_allowed">>, - ?l2b("This config variable is read-only")) + send_error( + Req, + 400, + <<"modification_not_allowed">>, + ?l2b("This config variable is read-only") + ) end end; handle_config_req(Req) -> @@ -218,52 +254,60 @@ handle_config_req(Req) -> % "value" handle_approved_config_req(Req, Persist) -> Query = couch_httpd:qs(Req), - UseRawValue = case lists:keyfind("raw", 1, Query) of - false -> false; % Not specified - {"raw", ""} -> false; % Specified with no value, i.e. "?raw" and "?raw=" - {"raw", "false"} -> false; - {"raw", "true"} -> true; - {"raw", InvalidValue} -> InvalidValue - end, + UseRawValue = + case lists:keyfind("raw", 1, Query) of + % Not specified + false -> false; + % Specified with no value, i.e. "?raw" and "?raw=" + {"raw", ""} -> false; + {"raw", "false"} -> false; + {"raw", "true"} -> true; + {"raw", InvalidValue} -> InvalidValue + end, handle_approved_config_req(Req, Persist, UseRawValue). -handle_approved_config_req(#httpd{method='PUT', path_parts=[_, Section, Key]}=Req, - Persist, UseRawValue) - when UseRawValue =:= false orelse UseRawValue =:= true -> +handle_approved_config_req( + #httpd{method = 'PUT', path_parts = [_, Section, Key]} = Req, + Persist, + UseRawValue +) when + UseRawValue =:= false orelse UseRawValue =:= true +-> RawValue = couch_httpd:json_body(Req), - Value = case UseRawValue of - true -> - % Client requests no change to the provided value. - RawValue; - false -> - % Pre-process the value as necessary. - case Section of - <<"admins">> -> - couch_passwords:hash_admin_password(RawValue); - _ -> - couch_util:trim(RawValue) - end - end, + Value = + case UseRawValue of + true -> + % Client requests no change to the provided value. + RawValue; + false -> + % Pre-process the value as necessary. + case Section of + <<"admins">> -> + couch_passwords:hash_admin_password(RawValue); + _ -> + couch_util:trim(RawValue) + end + end, OldValue = config:get(Section, Key, ""), case config:set(Section, Key, ?b2l(Value), Persist) of - ok -> - send_json(Req, 200, list_to_binary(OldValue)); - Error -> - throw(Error) + ok -> + send_json(Req, 200, list_to_binary(OldValue)); + Error -> + throw(Error) end; - -handle_approved_config_req(#httpd{method='PUT'}=Req, _Persist, UseRawValue) -> +handle_approved_config_req(#httpd{method = 'PUT'} = Req, _Persist, UseRawValue) -> Err = io_lib:format("Bad value for 'raw' option: ~s", [UseRawValue]), send_json(Req, 400, {[{error, ?l2b(Err)}]}); - % DELETE /_config/Section/Key -handle_approved_config_req(#httpd{method='DELETE',path_parts=[_,Section,Key]}=Req, - Persist, _UseRawValue) -> +handle_approved_config_req( + #httpd{method = 'DELETE', path_parts = [_, Section, Key]} = Req, + Persist, + _UseRawValue +) -> case config:get(Section, Key, undefined) of - undefined -> - throw({not_found, unknown_config_value}); - OldValue -> - config:delete(Section, Key, Persist), - send_json(Req, 200, list_to_binary(OldValue)) + undefined -> + throw({not_found, unknown_config_value}); + OldValue -> + config:delete(Section, Key, Persist), + send_json(Req, 200, list_to_binary(OldValue)) end. - diff --git a/src/couch/src/couch_httpd_multipart.erl b/src/couch/src/couch_httpd_multipart.erl index 33795a3a1..ecdf10562 100644 --- a/src/couch/src/couch_httpd_multipart.erl +++ b/src/couch/src/couch_httpd_multipart.erl @@ -30,47 +30,53 @@ decode_multipart_stream(ContentType, DataFun, Ref) -> ParentRef = erlang:monitor(process, Parent), put(mp_parent_ref, ParentRef), num_mp_writers(NumMpWriters), - {<<"--",_/binary>>, _, _} = couch_httpd:parse_multipart_request( - ContentType, DataFun, - fun(Next) -> mp_parse_doc(Next, []) end), + {<<"--", _/binary>>, _, _} = couch_httpd:parse_multipart_request( + ContentType, + DataFun, + fun(Next) -> mp_parse_doc(Next, []) end + ), unlink(Parent) - end), + end), Parser ! {get_doc_bytes, Ref, self()}, receive - {started_open_doc_revs, NewRef} -> - %% FIXME: How to remove the knowledge about this message? - {{started_open_doc_revs, NewRef}, Parser, ParserRef}; - {doc_bytes, Ref, DocBytes} -> - {{doc_bytes, Ref, DocBytes}, Parser, ParserRef}; - {'DOWN', ParserRef, _, _, normal} -> - ok; - {'DOWN', ParserRef, process, Parser, {{nocatch, {Error, Msg}}, _}} -> - couch_log:error("Multipart streamer ~p died with reason ~p", - [ParserRef, Msg]), - throw({Error, Msg}); - {'DOWN', ParserRef, _, _, Reason} -> - couch_log:error("Multipart streamer ~p died with reason ~p", - [ParserRef, Reason]), - throw({error, Reason}) + {started_open_doc_revs, NewRef} -> + %% FIXME: How to remove the knowledge about this message? + {{started_open_doc_revs, NewRef}, Parser, ParserRef}; + {doc_bytes, Ref, DocBytes} -> + {{doc_bytes, Ref, DocBytes}, Parser, ParserRef}; + {'DOWN', ParserRef, _, _, normal} -> + ok; + {'DOWN', ParserRef, process, Parser, {{nocatch, {Error, Msg}}, _}} -> + couch_log:error( + "Multipart streamer ~p died with reason ~p", + [ParserRef, Msg] + ), + throw({Error, Msg}); + {'DOWN', ParserRef, _, _, Reason} -> + couch_log:error( + "Multipart streamer ~p died with reason ~p", + [ParserRef, Reason] + ), + throw({error, Reason}) end. - mp_parse_doc({headers, H}, []) -> case couch_util:get_value("content-type", H) of - {"application/json", _} -> - fun (Next) -> - mp_parse_doc(Next, []) - end; - _ -> - throw({bad_ctype, <<"Content-Type must be application/json">>}) + {"application/json", _} -> + fun(Next) -> + mp_parse_doc(Next, []) + end; + _ -> + throw({bad_ctype, <<"Content-Type must be application/json">>}) end; mp_parse_doc({body, Bytes}, AccBytes) -> - fun (Next) -> + fun(Next) -> mp_parse_doc(Next, [Bytes | AccBytes]) end; mp_parse_doc(body_end, AccBytes) -> - receive {get_doc_bytes, Ref, From} -> - From ! {doc_bytes, Ref, lists:reverse(AccBytes)} + receive + {get_doc_bytes, Ref, From} -> + From ! {doc_bytes, Ref, lists:reverse(AccBytes)} end, fun(Next) -> mp_parse_atts(Next, {Ref, [], 0, orddict:new(), []}) @@ -81,7 +87,7 @@ mp_parse_atts({headers, _}, Acc) -> mp_parse_atts(body_end, Acc) -> fun(Next) -> mp_parse_atts(Next, Acc) end; mp_parse_atts({body, Bytes}, {Ref, Chunks, Offset, Counters, Waiting}) -> - case maybe_send_data({Ref, Chunks++[Bytes], Offset, Counters, Waiting}) of + case maybe_send_data({Ref, Chunks ++ [Bytes], Offset, Counters, Waiting}) of abort_parsing -> fun(Next) -> mp_abort_parse_atts(Next, nil) end; NewAcc -> @@ -91,34 +97,34 @@ mp_parse_atts(eof, {Ref, Chunks, Offset, Counters, Waiting}) -> N = num_mp_writers(), M = length(Counters), case (M == N) andalso Chunks == [] of - true -> - ok; - false -> - ParentRef = get(mp_parent_ref), - receive - abort_parsing -> + true -> ok; - {get_bytes, Ref, From} -> - C2 = update_writer(From, Counters), - case maybe_send_data({Ref, Chunks, Offset, C2, [From|Waiting]}) of - abort_parsing -> - ok; - NewAcc -> - mp_parse_atts(eof, NewAcc) - end; - {'DOWN', ParentRef, _, _, _} -> - exit(mp_reader_coordinator_died); - {'DOWN', WriterRef, _, WriterPid, _} -> - case remove_writer(WriterPid, WriterRef, Counters) of + false -> + ParentRef = get(mp_parent_ref), + receive abort_parsing -> ok; - C2 -> - NewAcc = {Ref, Chunks, Offset, C2, Waiting -- [WriterPid]}, - mp_parse_atts(eof, NewAcc) + {get_bytes, Ref, From} -> + C2 = update_writer(From, Counters), + case maybe_send_data({Ref, Chunks, Offset, C2, [From | Waiting]}) of + abort_parsing -> + ok; + NewAcc -> + mp_parse_atts(eof, NewAcc) + end; + {'DOWN', ParentRef, _, _, _} -> + exit(mp_reader_coordinator_died); + {'DOWN', WriterRef, _, WriterPid, _} -> + case remove_writer(WriterPid, WriterRef, Counters) of + abort_parsing -> + ok; + C2 -> + NewAcc = {Ref, Chunks, Offset, C2, Waiting -- [WriterPid]}, + mp_parse_atts(eof, NewAcc) + end + after 300000 -> + ok end - after 300000 -> - ok - end end. mp_abort_parse_atts(eof, _) -> @@ -127,82 +133,89 @@ mp_abort_parse_atts(_, _) -> fun(Next) -> mp_abort_parse_atts(Next, nil) end. maybe_send_data({Ref, Chunks, Offset, Counters, Waiting}) -> - receive {get_bytes, Ref, From} -> - NewCounters = update_writer(From, Counters), - maybe_send_data({Ref, Chunks, Offset, NewCounters, [From|Waiting]}) + receive + {get_bytes, Ref, From} -> + NewCounters = update_writer(From, Counters), + maybe_send_data({Ref, Chunks, Offset, NewCounters, [From | Waiting]}) after 0 -> % reply to as many writers as possible - NewWaiting = lists:filter(fun(Writer) -> - {_, WhichChunk} = orddict:fetch(Writer, Counters), - ListIndex = WhichChunk - Offset, - if ListIndex =< length(Chunks) -> - Writer ! {bytes, Ref, lists:nth(ListIndex, Chunks)}, - false; - true -> - true - end - end, Waiting), + NewWaiting = lists:filter( + fun(Writer) -> + {_, WhichChunk} = orddict:fetch(Writer, Counters), + ListIndex = WhichChunk - Offset, + if + ListIndex =< length(Chunks) -> + Writer ! {bytes, Ref, lists:nth(ListIndex, Chunks)}, + false; + true -> + true + end + end, + Waiting + ), % check if we can drop a chunk from the head of the list - SmallestIndex = case Counters of - [] -> - 0; - _ -> - lists:min([C || {_WPid, {_WRef, C}} <- Counters]) - end, + SmallestIndex = + case Counters of + [] -> + 0; + _ -> + lists:min([C || {_WPid, {_WRef, C}} <- Counters]) + end, Size = length(Counters), N = num_mp_writers(), - if Size == N andalso SmallestIndex == (Offset+1) -> - NewChunks = tl(Chunks), - NewOffset = Offset+1; - true -> - NewChunks = Chunks, - NewOffset = Offset + if + Size == N andalso SmallestIndex == (Offset + 1) -> + NewChunks = tl(Chunks), + NewOffset = Offset + 1; + true -> + NewChunks = Chunks, + NewOffset = Offset end, % we should wait for a writer if no one has written the last chunk LargestIndex = lists:max([0] ++ [C || {_WPid, {_WRef, C}} <- Counters]), - if LargestIndex >= (Offset + length(Chunks)) -> - % someone has written all possible chunks, keep moving - {Ref, NewChunks, NewOffset, Counters, NewWaiting}; - true -> - ParentRef = get(mp_parent_ref), - receive - abort_parsing -> - abort_parsing; - {'DOWN', ParentRef, _, _, _} -> - exit(mp_reader_coordinator_died); - {'DOWN', WriterRef, _, WriterPid, _} -> - case remove_writer(WriterPid, WriterRef, Counters) of + if + LargestIndex >= (Offset + length(Chunks)) -> + % someone has written all possible chunks, keep moving + {Ref, NewChunks, NewOffset, Counters, NewWaiting}; + true -> + ParentRef = get(mp_parent_ref), + receive abort_parsing -> abort_parsing; - C2 -> - RestWaiting = NewWaiting -- [WriterPid], - NewAcc = {Ref, NewChunks, NewOffset, C2, RestWaiting}, - maybe_send_data(NewAcc) - end; - {get_bytes, Ref, X} -> - C2 = update_writer(X, Counters), - maybe_send_data({Ref, NewChunks, NewOffset, C2, [X|NewWaiting]}) - after 300000 -> - abort_parsing - end + {'DOWN', ParentRef, _, _, _} -> + exit(mp_reader_coordinator_died); + {'DOWN', WriterRef, _, WriterPid, _} -> + case remove_writer(WriterPid, WriterRef, Counters) of + abort_parsing -> + abort_parsing; + C2 -> + RestWaiting = NewWaiting -- [WriterPid], + NewAcc = {Ref, NewChunks, NewOffset, C2, RestWaiting}, + maybe_send_data(NewAcc) + end; + {get_bytes, Ref, X} -> + C2 = update_writer(X, Counters), + maybe_send_data({Ref, NewChunks, NewOffset, C2, [X | NewWaiting]}) + after 300000 -> + abort_parsing + end end end. - update_writer(WriterPid, Counters) -> UpdateFun = fun({WriterRef, Count}) -> {WriterRef, Count + 1} end, - InitialValue = case orddict:find(WriterPid, Counters) of - {ok, IV} -> - IV; - error -> - WriterRef = erlang:monitor(process, WriterPid), - {WriterRef, 1} - end, + InitialValue = + case orddict:find(WriterPid, Counters) of + {ok, IV} -> + IV; + error -> + WriterRef = erlang:monitor(process, WriterPid), + {WriterRef, 1} + end, orddict:update(WriterPid, UpdateFun, InitialValue, Counters). - remove_writer(WriterPid, WriterRef, Counters) -> case orddict:find(WriterPid, Counters) of {ok, {WriterRef, _}} -> @@ -221,11 +234,9 @@ remove_writer(WriterPid, WriterRef, Counters) -> abort_parsing end. - num_mp_writers(N) -> erlang:put(mp_att_writers, N). - num_mp_writers() -> case erlang:get(mp_att_writers) of undefined -> 1; @@ -235,15 +246,21 @@ num_mp_writers() -> encode_multipart_stream(_Boundary, JsonBytes, [], WriteFun, _AttFun) -> WriteFun(JsonBytes); encode_multipart_stream(Boundary, JsonBytes, Atts, WriteFun, AttFun) -> - WriteFun([<<"--", Boundary/binary, - "\r\nContent-Type: application/json\r\n\r\n">>, - JsonBytes, <<"\r\n--", Boundary/binary>>]), + WriteFun([ + <<"--", Boundary/binary, "\r\nContent-Type: application/json\r\n\r\n">>, + JsonBytes, + <<"\r\n--", Boundary/binary>> + ]), atts_to_mp(Atts, Boundary, WriteFun, AttFun). atts_to_mp([], _Boundary, WriteFun, _AttFun) -> WriteFun(<<"--">>); -atts_to_mp([{Att, Name, Len, Type, Encoding} | RestAtts], Boundary, WriteFun, - AttFun) -> +atts_to_mp( + [{Att, Name, Len, Type, Encoding} | RestAtts], + Boundary, + WriteFun, + AttFun +) -> LengthBin = list_to_binary(integer_to_list(Len)), % write headers WriteFun(<<"\r\nContent-Disposition: attachment; filename=\"", Name/binary, "\"">>), @@ -264,40 +281,52 @@ atts_to_mp([{Att, Name, Len, Type, Encoding} | RestAtts], Boundary, WriteFun, atts_to_mp(RestAtts, Boundary, WriteFun, AttFun). length_multipart_stream(Boundary, JsonBytes, Atts) -> - AttsSize = lists:foldl(fun({_Att, Name, Len, Type, Encoding}, AccAttsSize) -> - AccAttsSize + - 4 + % "\r\n\r\n" - length(integer_to_list(Len)) + - Len + - 4 + % "\r\n--" - size(Boundary) + - % attachment headers - % (the length of the Content-Length has already been set) - size(Name) + - size(Type) + - length("\r\nContent-Disposition: attachment; filename=\"\"") + - length("\r\nContent-Type: ") + - length("\r\nContent-Length: ") + - case Encoding of - identity -> - 0; - _ -> - length(atom_to_list(Encoding)) + - length("\r\nContent-Encoding: ") - end - end, 0, Atts), - if AttsSize == 0 -> - {<<"application/json">>, iolist_size(JsonBytes)}; - true -> - {<<"multipart/related; boundary=\"", Boundary/binary, "\"">>, - 2 + % "--" - size(Boundary) + - 36 + % "\r\ncontent-type: application/json\r\n\r\n" - iolist_size(JsonBytes) + - 4 + % "\r\n--" - size(Boundary) + - + AttsSize + - 2 % "--" + AttsSize = lists:foldl( + fun({_Att, Name, Len, Type, Encoding}, AccAttsSize) -> + AccAttsSize + + % "\r\n\r\n" + 4 + + length(integer_to_list(Len)) + + Len + + % "\r\n--" + 4 + + size(Boundary) + + % attachment headers + % (the length of the Content-Length has already been set) + size(Name) + + size(Type) + + length("\r\nContent-Disposition: attachment; filename=\"\"") + + length("\r\nContent-Type: ") + + length("\r\nContent-Length: ") + + case Encoding of + identity -> + 0; + _ -> + length(atom_to_list(Encoding)) + + length("\r\nContent-Encoding: ") + end + end, + 0, + Atts + ), + if + AttsSize == 0 -> + {<<"application/json">>, iolist_size(JsonBytes)}; + true -> + { + <<"multipart/related; boundary=\"", Boundary/binary, "\"">>, + % "--" + 2 + + size(Boundary) + + % "\r\ncontent-type: application/json\r\n\r\n" + 36 + + iolist_size(JsonBytes) + + % "\r\n--" + 4 + + size(Boundary) + + +AttsSize + + % "--" + 2 } end. diff --git a/src/couch/src/couch_httpd_rewrite.erl b/src/couch/src/couch_httpd_rewrite.erl index 40e5c9e3c..97f48a4c0 100644 --- a/src/couch/src/couch_httpd_rewrite.erl +++ b/src/couch/src/couch_httpd_rewrite.erl @@ -12,7 +12,6 @@ % % bind_path is based on bind method from Webmachine - %% @doc Module for URL rewriting by pattern matching. -module(couch_httpd_rewrite). @@ -25,7 +24,6 @@ -define(SEPARATOR, $\/). -define(MATCH_ALL, {bind, <<"*">>}). - %% doc The http rewrite handler. All rewriting is done from %% /dbname/_design/ddocname/_rewrite by default. %% @@ -110,13 +108,15 @@ %% "to": "/some/:foo", %% }} - - -handle_rewrite_req(#httpd{ - path_parts=[DbName, <<"_design">>, DesignName, _Rewrite|PathParts], - method=Method, - mochi_req=MochiReq}=Req, _Db, DDoc) -> - +handle_rewrite_req( + #httpd{ + path_parts = [DbName, <<"_design">>, DesignName, _Rewrite | PathParts], + method = Method, + mochi_req = MochiReq + } = Req, + _Db, + DDoc +) -> % we are in a design handler DesignId = <<"_design/", DesignName/binary>>, Prefix = <<"/", (?l2b(couch_util:url_encode(DbName)))/binary, "/", DesignId/binary>>, @@ -131,19 +131,27 @@ handle_rewrite_req(#httpd{ erlang:put(?REWRITE_COUNT, RewritesSoFar + 1) end, - #doc{body={Props}} = DDoc, + #doc{body = {Props}} = DDoc, % get rules from ddoc case couch_util:get_value(<<"rewrites">>, Props) of undefined -> - couch_httpd:send_error(Req, 404, <<"rewrite_error">>, - <<"Invalid path.">>); + couch_httpd:send_error( + Req, + 404, + <<"rewrite_error">>, + <<"Invalid path.">> + ); Bin when is_binary(Bin) -> - couch_httpd:send_error(Req, 400, <<"rewrite_error">>, - <<"Rewrite rules are a String. They must be a JSON Array.">>); + couch_httpd:send_error( + Req, + 400, + <<"rewrite_error">>, + <<"Rewrite rules are a String. They must be a JSON Array.">> + ); Rules -> % create dispatch list from rules - DispatchList = [make_rule(Rule) || {Rule} <- Rules], + DispatchList = [make_rule(Rule) || {Rule} <- Rules], Method1 = couch_util:to_binary(Method), % get raw path by matching url to a rule. Throws not_found. @@ -155,39 +163,45 @@ handle_rewrite_req(#httpd{ Path0 = string:join(NewPathParts, [?SEPARATOR]), % if path is relative detect it and rewrite path - Path1 = case mochiweb_util:safe_relative_path(Path0) of - undefined -> - ?b2l(Prefix) ++ "/" ++ Path0; - P1 -> - ?b2l(Prefix) ++ "/" ++ P1 - end, + Path1 = + case mochiweb_util:safe_relative_path(Path0) of + undefined -> + ?b2l(Prefix) ++ "/" ++ Path0; + P1 -> + ?b2l(Prefix) ++ "/" ++ P1 + end, Path2 = normalize_path(Path1), - Path3 = case Bindings of - [] -> - Path2; - _ -> - [Path2, "?", mochiweb_util:urlencode(Bindings)] - end, + Path3 = + case Bindings of + [] -> + Path2; + _ -> + [Path2, "?", mochiweb_util:urlencode(Bindings)] + end, RawPath1 = ?b2l(iolist_to_binary(Path3)), % In order to do OAuth correctly, we have to save the % requested path. We use default so chained rewriting % wont replace the original header. - Headers = mochiweb_headers:default("x-couchdb-requested-path", - MochiReq:get(raw_path), - MochiReq:get(headers)), + Headers = mochiweb_headers:default( + "x-couchdb-requested-path", + MochiReq:get(raw_path), + MochiReq:get(headers) + ), couch_log:debug("rewrite to ~p ~n", [RawPath1]), % build a new mochiweb request - MochiReq1 = mochiweb_request:new(MochiReq:get(socket), - MochiReq:get(method), - RawPath1, - MochiReq:get(version), - Headers), + MochiReq1 = mochiweb_request:new( + MochiReq:get(socket), + MochiReq:get(method), + RawPath1, + MochiReq:get(version), + Headers + ), % cleanup, It force mochiweb to reparse raw uri. MochiReq1:cleanup(), @@ -198,14 +212,19 @@ handle_rewrite_req(#httpd{ default_fun = DefaultFun, url_handlers = UrlHandlers, user_ctx = UserCtx, - auth = Auth + auth = Auth } = Req, erlang:put(pre_rewrite_auth, Auth), erlang:put(pre_rewrite_user_ctx, UserCtx), - couch_httpd:handle_request_int(MochiReq1, DefaultFun, - UrlHandlers, DbUrlHandlers, DesignUrlHandlers) - end. + couch_httpd:handle_request_int( + MochiReq1, + DefaultFun, + UrlHandlers, + DbUrlHandlers, + DesignUrlHandlers + ) + end. quote_plus({bind, X}) -> mochiweb_util:quote_plus(X); @@ -216,7 +235,7 @@ quote_plus(X) -> %% 404 error not_found is raised try_bind_path([], _Method, _PathParts, _QueryList) -> throw(not_found); -try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) -> +try_bind_path([Dispatch | Rest], Method, PathParts, QueryList) -> [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch, case bind_method(Method1, Method) of true -> @@ -225,22 +244,35 @@ try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) -> Bindings1 = Bindings ++ QueryList, % we parse query args from the rule and fill % it eventually with bindings vars - QueryArgs1 = make_query_list(QueryArgs, Bindings1, - Formats, []), + QueryArgs1 = make_query_list( + QueryArgs, + Bindings1, + Formats, + [] + ), % remove params in QueryLists1 that are already in % QueryArgs1 - Bindings2 = lists:foldl(fun({K, V}, Acc) -> - K1 = to_binding(K), - KV = case couch_util:get_value(K1, QueryArgs1) of - undefined -> [{K1, V}]; - _V1 -> [] + Bindings2 = lists:foldl( + fun({K, V}, Acc) -> + K1 = to_binding(K), + KV = + case couch_util:get_value(K1, QueryArgs1) of + undefined -> [{K1, V}]; + _V1 -> [] + end, + Acc ++ KV end, - Acc ++ KV - end, [], Bindings1), + [], + Bindings1 + ), FinalBindings = Bindings2 ++ QueryArgs1, - NewPathParts = make_new_path(RedirectPath, FinalBindings, - Remaining, []), + NewPathParts = make_new_path( + RedirectPath, + FinalBindings, + Remaining, + [] + ), {NewPathParts, FinalBindings}; fail -> try_bind_path(Rest, Method, PathParts, QueryList) @@ -254,37 +286,51 @@ try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) -> %% passed in url. make_query_list([], _Bindings, _Formats, Acc) -> Acc; -make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) -> +make_query_list([{Key, {Value}} | Rest], Bindings, Formats, Acc) -> Value1 = {Value}, - make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]); -make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) -> + make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]); +make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) when is_binary(Value) -> Value1 = replace_var(Value, Bindings, Formats), - make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]); -make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) -> + make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]); +make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) when is_list(Value) -> Value1 = replace_var(Value, Bindings, Formats), - make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]); -make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) -> - make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]). + make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]); +make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) -> + make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value} | Acc]). -replace_var(<<"*">>=Value, Bindings, Formats) -> +replace_var(<<"*">> = Value, Bindings, Formats) -> get_var(Value, Bindings, Value, Formats); replace_var(<<":", Var/binary>> = Value, Bindings, Formats) -> get_var(Var, Bindings, Value, Formats); replace_var(Value, _Bindings, _Formats) when is_binary(Value) -> Value; replace_var(Value, Bindings, Formats) when is_list(Value) -> - lists:reverse(lists:foldl(fun - (<<":", Var/binary>>=Value1, Acc) -> - [get_var(Var, Bindings, Value1, Formats)|Acc]; + lists:reverse( + lists:foldl( + fun + (<<":", Var/binary>> = Value1, Acc) -> + [get_var(Var, Bindings, Value1, Formats) | Acc]; (Value1, Acc) -> - [Value1|Acc] - end, [], Value)); + [Value1 | Acc] + end, + [], + Value + ) + ); replace_var(Value, _Bindings, _Formats) -> Value. - + maybe_json(Key, Value) -> - case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>, - <<"endkey">>, <<"end_key">>, <<"keys">>]) of + case + lists:member(Key, [ + <<"key">>, + <<"startkey">>, + <<"start_key">>, + <<"endkey">>, + <<"end_key">>, + <<"keys">> + ]) + of true -> ?JSON_ENCODE(Value); false -> @@ -299,7 +345,7 @@ get_var(VarName, Props, Default, Formats) -> maybe_format(VarName, Value, Formats) -> case couch_util:get_value(VarName, Formats) of undefined -> - Value; + Value; Format -> format(Format, Value) end. @@ -324,7 +370,7 @@ format(<<"bool">>, Value) when is_list(Value) -> _ -> Value end; format(_Format, Value) -> - Value. + Value. %% doc: build new patch from bindings. bindings are query args %% (+ dynamic query rewritten if needed) and bindings found in @@ -334,94 +380,103 @@ make_new_path([], _Bindings, _Remaining, Acc) -> make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) -> Acc1 = lists:reverse(Acc) ++ Remaining, Acc1; -make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) -> +make_new_path([?MATCH_ALL | _Rest], _Bindings, Remaining, Acc) -> Acc1 = lists:reverse(Acc) ++ Remaining, Acc1; -make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) -> - P2 = case couch_util:get_value({bind, P}, Bindings) of - undefined -> << "undefined">>; - P1 -> - iolist_to_binary(P1) - end, - make_new_path(Rest, Bindings, Remaining, [P2|Acc]); -make_new_path([P|Rest], Bindings, Remaining, Acc) -> - make_new_path(Rest, Bindings, Remaining, [P|Acc]). - +make_new_path([{bind, P} | Rest], Bindings, Remaining, Acc) -> + P2 = + case couch_util:get_value({bind, P}, Bindings) of + undefined -> <<"undefined">>; + P1 -> iolist_to_binary(P1) + end, + make_new_path(Rest, Bindings, Remaining, [P2 | Acc]); +make_new_path([P | Rest], Bindings, Remaining, Acc) -> + make_new_path(Rest, Bindings, Remaining, [P | Acc]). %% @doc If method of the query fith the rule method. If the %% method rule is '*', which is the default, all %% request method will bind. It allows us to make rules %% depending on HTTP method. -bind_method(?MATCH_ALL, _Method ) -> +bind_method(?MATCH_ALL, _Method) -> true; bind_method({bind, Method}, Method) -> true; bind_method(_, _) -> false. - %% @doc bind path. Using the rule from we try to bind variables given %% to the current url by pattern matching bind_path([], [], Bindings) -> {ok, [], Bindings}; -bind_path([?MATCH_ALL], [Match|_RestMatch]=Rest, Bindings) -> - {ok, Rest, [{?MATCH_ALL, Match}|Bindings]}; +bind_path([?MATCH_ALL], [Match | _RestMatch] = Rest, Bindings) -> + {ok, Rest, [{?MATCH_ALL, Match} | Bindings]}; bind_path(_, [], _) -> fail; -bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) -> - bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]); -bind_path([Token|RestToken], [Token|RestMatch], Bindings) -> +bind_path([{bind, Token} | RestToken], [Match | RestMatch], Bindings) -> + bind_path(RestToken, RestMatch, [{{bind, Token}, Match} | Bindings]); +bind_path([Token | RestToken], [Token | RestMatch], Bindings) -> bind_path(RestToken, RestMatch, Bindings); bind_path(_, _, _) -> fail. - %% normalize path. -normalize_path(Path) -> - "/" ++ string:join(normalize_path1(string:tokens(Path, - "/"), []), [?SEPARATOR]). - +normalize_path(Path) -> + "/" ++ + string:join( + normalize_path1( + string:tokens( + Path, + "/" + ), + [] + ), + [?SEPARATOR] + ). normalize_path1([], Acc) -> lists:reverse(Acc); -normalize_path1([".."|Rest], Acc) -> - Acc1 = case Acc of - [] -> [".."|Acc]; - [T|_] when T =:= ".." -> [".."|Acc]; - [_|R] -> R - end, +normalize_path1([".." | Rest], Acc) -> + Acc1 = + case Acc of + [] -> [".." | Acc]; + [T | _] when T =:= ".." -> [".." | Acc]; + [_ | R] -> R + end, normalize_path1(Rest, Acc1); -normalize_path1(["."|Rest], Acc) -> +normalize_path1(["." | Rest], Acc) -> normalize_path1(Rest, Acc); -normalize_path1([Path|Rest], Acc) -> - normalize_path1(Rest, [Path|Acc]). - +normalize_path1([Path | Rest], Acc) -> + normalize_path1(Rest, [Path | Acc]). %% @doc transform json rule in erlang for pattern matching make_rule(Rule) -> - Method = case couch_util:get_value(<<"method">>, Rule) of - undefined -> ?MATCH_ALL; - M -> to_binding(M) - end, - QueryArgs = case couch_util:get_value(<<"query">>, Rule) of - undefined -> []; - {Args} -> Args + Method = + case couch_util:get_value(<<"method">>, Rule) of + undefined -> ?MATCH_ALL; + M -> to_binding(M) end, - FromParts = case couch_util:get_value(<<"from">>, Rule) of - undefined -> [?MATCH_ALL]; - From -> - parse_path(From) + QueryArgs = + case couch_util:get_value(<<"query">>, Rule) of + undefined -> []; + {Args} -> Args end, - ToParts = case couch_util:get_value(<<"to">>, Rule) of - undefined -> - throw({error, invalid_rewrite_target}); - To -> - parse_path(To) + FromParts = + case couch_util:get_value(<<"from">>, Rule) of + undefined -> [?MATCH_ALL]; + From -> parse_path(From) + end, + ToParts = + case couch_util:get_value(<<"to">>, Rule) of + undefined -> + throw({error, invalid_rewrite_target}); + To -> + parse_path(To) + end, + Formats = + case couch_util:get_value(<<"formats">>, Rule) of + undefined -> []; + {Fmts} -> Fmts end, - Formats = case couch_util:get_value(<<"formats">>, Rule) of - undefined -> []; - {Fmts} -> Fmts - end, [{FromParts, Method}, ToParts, QueryArgs, Formats]. parse_path(Path) -> @@ -433,43 +488,59 @@ parse_path(Path) -> %% in erlang atom. path_to_list([], Acc, _DotDotCount) -> lists:reverse(Acc); -path_to_list([<<>>|R], Acc, DotDotCount) -> +path_to_list([<<>> | R], Acc, DotDotCount) -> path_to_list(R, Acc, DotDotCount); -path_to_list([<<"*">>|R], Acc, DotDotCount) -> - path_to_list(R, [?MATCH_ALL|Acc], DotDotCount); -path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 -> +path_to_list([<<"*">> | R], Acc, DotDotCount) -> + path_to_list(R, [?MATCH_ALL | Acc], DotDotCount); +path_to_list([<<"..">> | R], Acc, DotDotCount) when DotDotCount == 2 -> case chttpd_util:get_chttpd_config_boolean("secure_rewrites", true) of false -> - path_to_list(R, [<<"..">>|Acc], DotDotCount+1); + path_to_list(R, [<<"..">> | Acc], DotDotCount + 1); true -> - couch_log:info("insecure_rewrite_rule ~p blocked", - [lists:reverse(Acc) ++ [<<"..">>] ++ R]), + couch_log:info( + "insecure_rewrite_rule ~p blocked", + [lists:reverse(Acc) ++ [<<"..">>] ++ R] + ), throw({insecure_rewrite_rule, "too many ../.. segments"}) end; -path_to_list([<<"..">>|R], Acc, DotDotCount) -> - path_to_list(R, [<<"..">>|Acc], DotDotCount+1); -path_to_list([P|R], Acc, DotDotCount) -> - P1 = case P of - <<":", Var/binary>> -> - to_binding(Var); - _ -> P - end, - path_to_list(R, [P1|Acc], DotDotCount). +path_to_list([<<"..">> | R], Acc, DotDotCount) -> + path_to_list(R, [<<"..">> | Acc], DotDotCount + 1); +path_to_list([P | R], Acc, DotDotCount) -> + P1 = + case P of + <<":", Var/binary>> -> + to_binding(Var); + _ -> + P + end, + path_to_list(R, [P1 | Acc], DotDotCount). maybe_encode_bindings([]) -> []; -maybe_encode_bindings(Props) -> - lists:foldl(fun +maybe_encode_bindings(Props) -> + lists:foldl( + fun ({{bind, <<"*">>}, _V}, Acc) -> Acc; ({{bind, K}, V}, Acc) -> V1 = iolist_to_binary(maybe_json(K, V)), - [{K, V1}|Acc] - end, [], Props). - -decode_query_value({K,V}) -> - case lists:member(K, ["key", "startkey", "start_key", - "endkey", "end_key", "keys"]) of + [{K, V1} | Acc] + end, + [], + Props + ). + +decode_query_value({K, V}) -> + case + lists:member(K, [ + "key", + "startkey", + "start_key", + "endkey", + "end_key", + "keys" + ]) + of true -> {to_binding(K), ?JSON_DECODE(V)}; false -> diff --git a/src/couch/src/couch_httpd_vhost.erl b/src/couch/src/couch_httpd_vhost.erl index 409631d25..0bff6a36d 100644 --- a/src/couch/src/couch_httpd_vhost.erl +++ b/src/couch/src/couch_httpd_vhost.erl @@ -33,9 +33,10 @@ -define(RELISTEN_DELAY, 5000). -record(vhosts_state, { - vhosts, - vhost_globals, - vhosts_fun}). + vhosts, + vhost_globals, + vhosts_fun +}). %% doc the vhost manager. %% This gen_server keep state of vhosts added to the ini and try to @@ -109,34 +110,44 @@ dispatch_host_int(MochiReq) -> #vhosts_state{ vhost_globals = VHostGlobals, vhosts = VHosts, - vhosts_fun=Fun} = get_state(), + vhosts_fun = Fun + } = get_state(), {"/" ++ VPath, Query, Fragment} = mochiweb_util:urlsplit_path(MochiReq:get(raw_path)), - VPathParts = string:tokens(VPath, "/"), + VPathParts = string:tokens(VPath, "/"), VHost = host(MochiReq), {VHostParts, VhostPort} = split_host_port(VHost), - FinalMochiReq = case try_bind_vhost(VHosts, lists:reverse(VHostParts), - VhostPort, VPathParts) of - no_vhost_matched -> MochiReq; - {VhostTarget, NewPath} -> - case vhost_global(VHostGlobals, MochiReq) of - true -> - MochiReq; - _Else -> - NewPath1 = mochiweb_util:urlunsplit_path({NewPath, Query, - Fragment}), - MochiReq1 = mochiweb_request:new(MochiReq:get(socket), - MochiReq:get(method), - NewPath1, - MochiReq:get(version), - MochiReq:get(headers)), - Fun(MochiReq1, VhostTarget) - end - end, + FinalMochiReq = + case + try_bind_vhost( + VHosts, + lists:reverse(VHostParts), + VhostPort, + VPathParts + ) + of + no_vhost_matched -> + MochiReq; + {VhostTarget, NewPath} -> + case vhost_global(VHostGlobals, MochiReq) of + true -> + MochiReq; + _Else -> + NewPath1 = mochiweb_util:urlunsplit_path({NewPath, Query, Fragment}), + MochiReq1 = mochiweb_request:new( + MochiReq:get(socket), + MochiReq:get(method), + NewPath1, + MochiReq:get(version), + MochiReq:get(headers) + ), + Fun(MochiReq1, VhostTarget) + end + end, FinalMochiReq. -append_path("/"=_Target, "/"=_Path) -> +append_path("/" = _Target, "/" = _Path) -> "/"; append_path(Target, Path) -> Target ++ Path. @@ -148,15 +159,20 @@ redirect_to_vhost(MochiReq, VhostTarget) -> couch_log:debug("Vhost Target: '~p'~n", [Target]), - Headers = mochiweb_headers:enter("x-couchdb-vhost-path", Path, - MochiReq:get(headers)), + Headers = mochiweb_headers:enter( + "x-couchdb-vhost-path", + Path, + MochiReq:get(headers) + ), % build a new mochiweb request - MochiReq1 = mochiweb_request:new(MochiReq:get(socket), - MochiReq:get(method), - Target, - MochiReq:get(version), - Headers), + MochiReq1 = mochiweb_request:new( + MochiReq:get(socket), + MochiReq:get(method), + Target, + MochiReq:get(version), + Headers + ), % cleanup, It force mochiweb to reparse raw uri. MochiReq1:cleanup(), MochiReq1. @@ -164,23 +180,25 @@ redirect_to_vhost(MochiReq, VhostTarget) -> %% if so, then it will not be rewritten, but will run as a normal couchdb request. %* normally you'd use this for _uuids _utils and a few of the others you want to %% keep available on vhosts. You can also use it to make databases 'global'. -vhost_global( VhostGlobals, MochiReq) -> +vhost_global(VhostGlobals, MochiReq) -> RawUri = MochiReq:get(raw_path), {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri), - Front = case couch_httpd:partition(Path) of - {"", "", ""} -> - "/"; % Special case the root url handler - {FirstPart, _, _} -> - FirstPart - end, - [true] == [true||V <- VhostGlobals, V == Front]. + Front = + case couch_httpd:partition(Path) of + {"", "", ""} -> + % Special case the root url handler + "/"; + {FirstPart, _, _} -> + FirstPart + end, + [true] == [true || V <- VhostGlobals, V == Front]. %% bind host %% first it try to bind the port then the hostname. try_bind_vhost([], _HostParts, _Port, _PathParts) -> no_vhost_matched; -try_bind_vhost([VhostSpec|Rest], HostParts, Port, PathParts) -> +try_bind_vhost([VhostSpec | Rest], HostParts, Port, PathParts) -> {{VHostParts, VPort, VPath}, Path} = VhostSpec, case bind_port(VPort, Port) of ok -> @@ -191,12 +209,18 @@ try_bind_vhost([VhostSpec|Rest], HostParts, Port, PathParts) -> Path1 = make_target(Path, Bindings, Remainings, []), {make_path(Path1), make_path(PathParts1)}; fail -> - try_bind_vhost(Rest, HostParts, Port, - PathParts) + try_bind_vhost( + Rest, + HostParts, + Port, + PathParts + ) end; - fail -> try_bind_vhost(Rest, HostParts, Port, PathParts) + fail -> + try_bind_vhost(Rest, HostParts, Port, PathParts) end; - fail -> try_bind_vhost(Rest, HostParts, Port, PathParts) + fail -> + try_bind_vhost(Rest, HostParts, Port, PathParts) end. %% doc: build new patch from bindings. bindings are query args @@ -209,72 +233,82 @@ make_target([], _Bindings, _Remaining, Acc) -> make_target([?MATCH_ALL], _Bindings, Remaining, Acc) -> Acc1 = lists:reverse(Acc) ++ Remaining, Acc1; -make_target([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) -> +make_target([?MATCH_ALL | _Rest], _Bindings, Remaining, Acc) -> Acc1 = lists:reverse(Acc) ++ Remaining, Acc1; -make_target([{bind, P}|Rest], Bindings, Remaining, Acc) -> - P2 = case couch_util:get_value({bind, P}, Bindings) of - undefined -> "undefined"; - P1 -> P1 - end, - make_target(Rest, Bindings, Remaining, [P2|Acc]); -make_target([P|Rest], Bindings, Remaining, Acc) -> - make_target(Rest, Bindings, Remaining, [P|Acc]). +make_target([{bind, P} | Rest], Bindings, Remaining, Acc) -> + P2 = + case couch_util:get_value({bind, P}, Bindings) of + undefined -> "undefined"; + P1 -> P1 + end, + make_target(Rest, Bindings, Remaining, [P2 | Acc]); +make_target([P | Rest], Bindings, Remaining, Acc) -> + make_target(Rest, Bindings, Remaining, [P | Acc]). %% bind port bind_port(Port, Port) -> ok; bind_port('*', _) -> ok; -bind_port(_,_) -> fail. +bind_port(_, _) -> fail. %% bind bhost -bind_vhost([],[], Bindings) -> {ok, Bindings, []}; -bind_vhost([?MATCH_ALL], [], _Bindings) -> fail; -bind_vhost([?MATCH_ALL], Rest, Bindings) -> {ok, Bindings, Rest}; -bind_vhost([], _HostParts, _Bindings) -> fail; -bind_vhost([{bind, Token}|Rest], [Match|RestHost], Bindings) -> - bind_vhost(Rest, RestHost, [{{bind, Token}, Match}|Bindings]); -bind_vhost([Cname|Rest], [Cname|RestHost], Bindings) -> +bind_vhost([], [], Bindings) -> + {ok, Bindings, []}; +bind_vhost([?MATCH_ALL], [], _Bindings) -> + fail; +bind_vhost([?MATCH_ALL], Rest, Bindings) -> + {ok, Bindings, Rest}; +bind_vhost([], _HostParts, _Bindings) -> + fail; +bind_vhost([{bind, Token} | Rest], [Match | RestHost], Bindings) -> + bind_vhost(Rest, RestHost, [{{bind, Token}, Match} | Bindings]); +bind_vhost([Cname | Rest], [Cname | RestHost], Bindings) -> bind_vhost(Rest, RestHost, Bindings); -bind_vhost(_, _, _) -> fail. +bind_vhost(_, _, _) -> + fail. %% bind path bind_path([], PathParts) -> {ok, PathParts}; bind_path(_VPathParts, []) -> fail; -bind_path([Path|VRest],[Path|Rest]) -> - bind_path(VRest, Rest); +bind_path([Path | VRest], [Path | Rest]) -> + bind_path(VRest, Rest); bind_path(_, _) -> fail. % utilities - %% create vhost list from ini host(MochiReq) -> XHost = chttpd_util:get_chttpd_config( - "x_forwarded_host", "X-Forwarded-Host"), + "x_forwarded_host", "X-Forwarded-Host" + ), case MochiReq:get_header_value(XHost) of undefined -> case MochiReq:get_header_value("Host") of undefined -> []; Value1 -> Value1 end; - Value -> Value + Value -> + Value end. make_vhosts() -> - Vhosts = lists:foldl(fun - ({_, ""}, Acc) -> - Acc; - ({Vhost, Path}, Acc) -> - [{parse_vhost(Vhost), split_path(Path)}|Acc] - end, [], config:get("vhosts")), + Vhosts = lists:foldl( + fun + ({_, ""}, Acc) -> + Acc; + ({Vhost, Path}, Acc) -> + [{parse_vhost(Vhost), split_path(Path)} | Acc] + end, + [], + config:get("vhosts") + ), lists:reverse(lists:usort(Vhosts)). - parse_vhost(Vhost) -> case urlsplit_netloc(Vhost, []) of {[], Path} -> @@ -289,15 +323,21 @@ parse_vhost(Vhost) -> {H1, P, string:tokens(Path, "/")} end. - split_host_port(HostAsString) -> case string:rchr(HostAsString, $:) of 0 -> {split_host(HostAsString), '*'}; N -> - HostPart = string:substr(HostAsString, 1, N-1), - case (catch erlang:list_to_integer(string:substr(HostAsString, - N+1, length(HostAsString)))) of + HostPart = string:substr(HostAsString, 1, N - 1), + case + (catch erlang:list_to_integer( + string:substr( + HostAsString, + N + 1, + length(HostAsString) + ) + )) + of {'EXIT', _} -> {split_host(HostAsString), '*'}; Port -> @@ -311,36 +351,34 @@ split_host(HostAsString) -> split_path(Path) -> make_spec(string:tokens(Path, "/"), []). - make_spec([], Acc) -> lists:reverse(Acc); -make_spec([""|R], Acc) -> +make_spec(["" | R], Acc) -> make_spec(R, Acc); -make_spec(["*"|R], Acc) -> - make_spec(R, [?MATCH_ALL|Acc]); -make_spec([P|R], Acc) -> +make_spec(["*" | R], Acc) -> + make_spec(R, [?MATCH_ALL | Acc]); +make_spec([P | R], Acc) -> P1 = parse_var(P), - make_spec(R, [P1|Acc]). - + make_spec(R, [P1 | Acc]). parse_var(P) -> case P of ":" ++ Var -> {bind, Var}; - _ -> P + _ -> + P end. - % mochiweb doesn't export it. urlsplit_netloc("", Acc) -> {lists:reverse(Acc), ""}; -urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# -> +urlsplit_netloc(Rest = [C | _], Acc) when C =:= $/; C =:= $?; C =:= $# -> {lists:reverse(Acc), Rest}; urlsplit_netloc([C | Rest], Acc) -> urlsplit_netloc(Rest, [C | Acc]). make_path(Parts) -> - "/" ++ string:join(Parts,[?SEPARATOR]). + "/" ++ string:join(Parts, [?SEPARATOR]). init(_) -> ok = config:listen_for_changes(?MODULE, nil), @@ -348,17 +386,19 @@ init(_) -> %% load configuration {VHostGlobals, VHosts, Fun} = load_conf(), State = #vhosts_state{ - vhost_globals=VHostGlobals, - vhosts=VHosts, - vhosts_fun=Fun}, + vhost_globals = VHostGlobals, + vhosts = VHosts, + vhosts_fun = Fun + }, {ok, State}. handle_call(reload, _From, _State) -> {VHostGlobals, VHosts, Fun} = load_conf(), {reply, ok, #vhosts_state{ - vhost_globals=VHostGlobals, - vhosts=VHosts, - vhosts_fun=Fun}}; + vhost_globals = VHostGlobals, + vhosts = VHosts, + vhosts_fun = Fun + }}; handle_call(get_state, _From, State) -> {reply, State, State}; handle_call(_Msg, _From, State) -> @@ -379,7 +419,6 @@ terminate(_Reason, _State) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. - handle_config_change("vhosts", _, _, _, _) -> {ok, ?MODULE:reload()}; handle_config_change(_, _, _, _, _) -> @@ -392,8 +431,11 @@ handle_config_terminate(_Server, _Reason, _State) -> load_conf() -> %% get vhost globals - VHostGlobals = re:split("_utils, _uuids, _session, _users", "\\s*,\\s*", - [{return, list}]), + VHostGlobals = re:split( + "_utils, _uuids, _session, _users", + "\\s*,\\s*", + [{return, list}] + ), %% build vhosts matching rules VHosts = make_vhosts(), diff --git a/src/couch/src/couch_io_logger.erl b/src/couch/src/couch_io_logger.erl index 188e031cb..f859874b6 100644 --- a/src/couch/src/couch_io_logger.erl +++ b/src/couch/src/couch_io_logger.erl @@ -20,7 +20,6 @@ stop_error/1 ]). - start(undefined) -> ok; start(Dir) -> @@ -42,7 +41,6 @@ start(Dir) -> ok end. - stop_noerror() -> case get(logger_path) of undefined -> @@ -51,7 +49,6 @@ stop_noerror() -> close_logs() end. - stop_error(Err) -> case get(logger_path) of undefined -> @@ -61,21 +58,17 @@ stop_error(Err) -> close_logs() end. - log_output(Data) -> log(get(logger_out_fd), Data). - log_input(Data) -> log(get(logger_in_fd), Data). - unix_time() -> {Mega, Sec, USec} = os:timestamp(), UnixTs = (Mega * 1000000 + Sec) * 1000000 + USec, integer_to_list(UnixTs). - log_name() -> Ts = unix_time(), Pid0 = erlang:pid_to_list(self()), @@ -83,12 +76,10 @@ log_name() -> Pid2 = string:strip(Pid1, right, $>), lists:flatten(io_lib:format("~s_~s", [Ts, Pid2])). - close_logs() -> file:close(get(logger_out_fd)), file:close(get(logger_in_fd)). - save_error_logs(Path, Err) -> Otp = erlang:system_info(otp_release), Msg = io_lib:format("Error: ~p~nNode: ~p~nOTP: ~p~n", [Err, node(), Otp]), @@ -97,10 +88,9 @@ save_error_logs(Path, Err) -> OFd = get(logger_in_fd), file:position(IFd, 0), file:position(OFd, 0), - file:copy(IFd, Path ++ ".out.log"), + file:copy(IFd, Path ++ ".out.log"), file:copy(OFd, Path ++ ".in.log"). - log(undefined, _Data) -> ok; log(Fd, Data) -> diff --git a/src/couch/src/couch_key_tree.erl b/src/couch/src/couch_key_tree.erl index 94150418e..84c786148 100644 --- a/src/couch/src/couch_key_tree.erl +++ b/src/couch/src/couch_key_tree.erl @@ -48,41 +48,43 @@ -module(couch_key_tree). -export([ -count_leafs/1, -find_missing/2, -fold/3, -get/2, -get_all_leafs/1, -get_all_leafs_full/1, -get_full_key_paths/2, -get_key_leafs/2, -map/2, -map_leafs/2, -mapfold/3, -multi_merge/2, -merge/2, -remove_leafs/2, -stem/2 + count_leafs/1, + find_missing/2, + fold/3, + get/2, + get_all_leafs/1, + get_all_leafs_full/1, + get_full_key_paths/2, + get_key_leafs/2, + map/2, + map_leafs/2, + mapfold/3, + multi_merge/2, + merge/2, + remove_leafs/2, + stem/2 ]). -include_lib("couch/include/couch_db.hrl"). --type treenode() :: {Key::term(), Value::term(), [Node::treenode()]}. --type tree() :: {Depth::pos_integer(), [treenode()]}. +-type treenode() :: {Key :: term(), Value :: term(), [Node :: treenode()]}. +-type tree() :: {Depth :: pos_integer(), [treenode()]}. -type revtree() :: [tree()]. - %% @doc Merge multiple paths into the given tree. -spec multi_merge(revtree(), tree()) -> revtree(). multi_merge(RevTree, Trees) -> - lists:foldl(fun(Tree, RevTreeAcc) -> - {NewRevTree, _} = merge(RevTreeAcc, Tree), - NewRevTree - end, RevTree, lists:sort(Trees)). - + lists:foldl( + fun(Tree, RevTreeAcc) -> + {NewRevTree, _} = merge(RevTreeAcc, Tree), + NewRevTree + end, + RevTree, + lists:sort(Trees) + ). %% @doc Merge a path into a tree. -spec merge(revtree(), tree() | path()) -> - {revtree(), new_leaf | new_branch | internal_node}. + {revtree(), new_leaf | new_branch | internal_node}. merge(RevTree, Tree) -> {Merged, Result} = merge_tree(RevTree, Tree, []), {lists:sort(Merged), Result}. @@ -92,12 +94,12 @@ merge(RevTree, Tree) -> %% If it can't find a branch that the new tree merges into, add it as a %% new branch in the RevTree. -spec merge_tree(revtree(), tree() | path(), revtree()) -> - {revtree(), new_leaf | new_branch | internal_node}. + {revtree(), new_leaf | new_branch | internal_node}. merge_tree([], Tree, []) -> {[Tree], new_leaf}; merge_tree([], Tree, MergeAcc) -> - {[Tree|MergeAcc], new_branch}; -merge_tree([{Depth, Nodes} | Rest], {IDepth, INodes}=Tree, MergeAcc) -> + {[Tree | MergeAcc], new_branch}; +merge_tree([{Depth, Nodes} | Rest], {IDepth, INodes} = Tree, MergeAcc) -> % For the intrepid observer following along at home, notice what we're % doing here with (Depth - IDepth). This tells us which of the two % branches (Nodes or INodes) we need to seek into. If Depth > IDepth @@ -125,7 +127,7 @@ merge_tree([{Depth, Nodes} | Rest], {IDepth, INodes}=Tree, MergeAcc) -> %% ends up running out of nodes we know that these two branches can %% not be merged. -spec merge_at([node()], integer(), [node()]) -> - {revtree(), new_leaf | new_branch | internal_node} | fail. + {revtree(), new_leaf | new_branch | internal_node} | fail. merge_at(_Nodes, _Pos, []) -> fail; merge_at([], _Pos, _INodes) -> @@ -172,7 +174,7 @@ merge_at([Tree | Sibs], 0, INodes) -> end. -spec merge_extend(revtree(), revtree()) -> - {revtree(), new_leaf | new_branch | internal_node}. + {revtree(), new_leaf | new_branch | internal_node}. merge_extend([], B) when B =/= [] -> % Most likely the insert branch simply extends this one, so the new % branch is exactly B. Its also possible that B is a branch because @@ -189,7 +191,7 @@ merge_extend([{K, V1, SubA} | NextA], [{K, V2, SubB}]) -> % level in the two branches. {Merged, Result} = merge_extend(SubA, SubB), {[{K, value_pref(V1, V2), Merged} | NextA], Result}; -merge_extend([{K1, _, _}=NodeA | Rest], [{K2, _, _}=NodeB]) when K1 > K2 -> +merge_extend([{K1, _, _} = NodeA | Rest], [{K2, _, _} = NodeB]) when K1 > K2 -> % Keys are ordered so we know this is where the insert branch needs % to be inserted into the tree. We also know that this creates a new % branch so we have a new leaf to report. @@ -200,10 +202,11 @@ merge_extend([Tree | RestA], NextB) -> % key in NextB might be larger than the largest key in RestA which % means we've created a new branch. {Merged, Result0} = merge_extend(RestA, NextB), - Result = case length(Merged) == length(RestA) of - true -> Result0; - false -> new_branch - end, + Result = + case length(Merged) == length(RestA) of + true -> Result0; + false -> new_branch + end, {[Tree | Merged], Result}. find_missing(_Tree, []) -> @@ -228,17 +231,17 @@ find_missing_simple(Pos, [{Key, _, SubTree} | RestTree], SeachKeys) -> SrcKeys3 = find_missing_simple(Pos + 1, SubTree, SrcKeys2), ImpossibleKeys ++ find_missing_simple(Pos, RestTree, SrcKeys3). - filter_leafs([], _Keys, FilteredAcc, RemovedKeysAcc) -> {FilteredAcc, RemovedKeysAcc}; -filter_leafs([{Pos, [{LeafKey, _}|_]} = Path |Rest], Keys, FilteredAcc, RemovedKeysAcc) -> +filter_leafs([{Pos, [{LeafKey, _} | _]} = Path | Rest], Keys, FilteredAcc, RemovedKeysAcc) -> FilteredKeys = lists:delete({Pos, LeafKey}, Keys), - if FilteredKeys == Keys -> - % this leaf is not a key we are looking to remove - filter_leafs(Rest, Keys, [Path | FilteredAcc], RemovedKeysAcc); - true -> - % this did match a key, remove both the node and the input key - filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc]) + if + FilteredKeys == Keys -> + % this leaf is not a key we are looking to remove + filter_leafs(Rest, Keys, [Path | FilteredAcc], RemovedKeysAcc); + true -> + % this did match a key, remove both the node and the input key + filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc]) end. % Removes any branches from the tree whose leaf node(s) are in the Keys @@ -255,15 +258,18 @@ remove_leafs(Trees, Keys) -> % convert paths back to trees NewTree = lists:foldl( - fun({StartPos, Path},TreeAcc) -> + fun({StartPos, Path}, TreeAcc) -> [SingleTree] = lists:foldl( - fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path), + fun({K, V}, NewTreeAcc) -> [{K, V, NewTreeAcc}] end, [], Path + ), {NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}), NewTrees - end, [], SortedPaths), + end, + [], + SortedPaths + ), {NewTree, RemovedKeys}. - % get the leafs in the tree matching the keys. The matching key nodes can be % leafs or an inner nodes. If an inner node, then the leafs for that node % are returned. @@ -274,7 +280,7 @@ get_key_leafs(_, [], Acc) -> {Acc, []}; get_key_leafs([], Keys, Acc) -> {Acc, Keys}; -get_key_leafs([{Pos, Tree}|Rest], Keys, Acc) -> +get_key_leafs([{Pos, Tree} | Rest], Keys, Acc) -> {Gotten, RemainingKeys} = get_key_leafs_simple(Pos, [Tree], Keys, []), get_key_leafs(Rest, RemainingKeys, Gotten ++ Acc). @@ -282,7 +288,7 @@ get_key_leafs_simple(_Pos, _Tree, [], _PathAcc) -> {[], []}; get_key_leafs_simple(_Pos, [], Keys, _PathAcc) -> {[], Keys}; -get_key_leafs_simple(Pos, [{Key, _, SubTree}=Tree | RestTree], Keys, PathAcc) -> +get_key_leafs_simple(Pos, [{Key, _, SubTree} = Tree | RestTree], Keys, PathAcc) -> case lists:delete({Pos, Key}, Keys) of Keys -> % Same list, key not found @@ -300,7 +306,6 @@ get_key_leafs_simple(Pos, [{Key, _, SubTree}=Tree | RestTree], Keys, PathAcc) -> {ChildLeafs ++ SiblingLeafs, Keys4} end. - get_key_leafs_simple2(_Pos, [], Keys, _PathAcc) -> % No more tree to deal with so no more keys to return. {[], Keys}; @@ -320,10 +325,12 @@ get_key_leafs_simple2(Pos, [{Key, _Value, SubTree} | RestTree], Keys, PathAcc) - {SiblingLeafs, Keys4} = get_key_leafs_simple2(Pos, RestTree, Keys3, PathAcc), {ChildLeafs ++ SiblingLeafs, Keys4}. - get(Tree, KeysToGet) -> {KeyPaths, KeysNotFound} = get_full_key_paths(Tree, KeysToGet), - FixedResults = [ {Value, {Pos, [Key0 || {Key0, _} <- Path]}} || {Pos, [{_Key, Value}|_]=Path} <- KeyPaths], + FixedResults = [ + {Value, {Pos, [Key0 || {Key0, _} <- Path]}} + || {Pos, [{_Key, Value} | _] = Path} <- KeyPaths + ], {FixedResults, KeysNotFound}. get_full_key_paths(Tree, Keys) -> @@ -333,11 +340,10 @@ get_full_key_paths(_, [], Acc) -> {Acc, []}; get_full_key_paths([], Keys, Acc) -> {Acc, Keys}; -get_full_key_paths([{Pos, Tree}|Rest], Keys, Acc) -> +get_full_key_paths([{Pos, Tree} | Rest], Keys, Acc) -> {Gotten, RemainingKeys} = get_full_key_paths(Pos, [Tree], Keys, []), get_full_key_paths(Rest, RemainingKeys, Gotten ++ Acc). - get_full_key_paths(_Pos, _Tree, [], _KeyPathAcc) -> {[], []}; get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) -> @@ -345,13 +351,17 @@ get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) -> get_full_key_paths(Pos, [{KeyId, Value, SubTree} | RestTree], KeysToGet, KeyPathAcc) -> KeysToGet2 = KeysToGet -- [{Pos, KeyId}], CurrentNodeResult = - case length(KeysToGet2) =:= length(KeysToGet) of - true -> % not in the key list. - []; - false -> % this node is the key list. return it - [{Pos, [{KeyId, Value} | KeyPathAcc]}] - end, - {KeysGotten, KeysRemaining} = get_full_key_paths(Pos + 1, SubTree, KeysToGet2, [{KeyId, Value} | KeyPathAcc]), + case length(KeysToGet2) =:= length(KeysToGet) of + % not in the key list. + true -> + []; + % this node is the key list. return it + false -> + [{Pos, [{KeyId, Value} | KeyPathAcc]}] + end, + {KeysGotten, KeysRemaining} = get_full_key_paths(Pos + 1, SubTree, KeysToGet2, [ + {KeyId, Value} | KeyPathAcc + ]), {KeysGotten2, KeysRemaining2} = get_full_key_paths(Pos, RestTree, KeysRemaining, KeyPathAcc), {CurrentNodeResult ++ KeysGotten ++ KeysGotten2, KeysRemaining2}. @@ -368,14 +378,15 @@ get_all_leafs_full_simple(_Pos, [], _KeyPathAcc) -> get_all_leafs_full_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) -> [{Pos, [{KeyId, Value} | KeyPathAcc]} | get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc)]; get_all_leafs_full_simple(Pos, [{KeyId, Value, SubTree} | RestTree], KeyPathAcc) -> - get_all_leafs_full_simple(Pos + 1, SubTree, [{KeyId, Value} | KeyPathAcc]) ++ get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc). + get_all_leafs_full_simple(Pos + 1, SubTree, [{KeyId, Value} | KeyPathAcc]) ++ + get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc). get_all_leafs(Trees) -> get_all_leafs(Trees, []). get_all_leafs([], Acc) -> Acc; -get_all_leafs([{Pos, Tree}|Rest], Acc) -> +get_all_leafs([{Pos, Tree} | Rest], Acc) -> get_all_leafs(Rest, get_all_leafs_simple(Pos, [Tree], []) ++ Acc). get_all_leafs_simple(_Pos, [], _KeyPathAcc) -> @@ -383,12 +394,12 @@ get_all_leafs_simple(_Pos, [], _KeyPathAcc) -> get_all_leafs_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) -> [{Value, {Pos, [KeyId | KeyPathAcc]}} | get_all_leafs_simple(Pos, RestTree, KeyPathAcc)]; get_all_leafs_simple(Pos, [{KeyId, _Value, SubTree} | RestTree], KeyPathAcc) -> - get_all_leafs_simple(Pos + 1, SubTree, [KeyId | KeyPathAcc]) ++ get_all_leafs_simple(Pos, RestTree, KeyPathAcc). - + get_all_leafs_simple(Pos + 1, SubTree, [KeyId | KeyPathAcc]) ++ + get_all_leafs_simple(Pos, RestTree, KeyPathAcc). count_leafs([]) -> 0; -count_leafs([{_Pos,Tree}|Rest]) -> +count_leafs([{_Pos, Tree} | Rest]) -> count_leafs_simple([Tree]) + count_leafs(Rest). count_leafs_simple([]) -> @@ -398,42 +409,49 @@ count_leafs_simple([{_Key, _Value, []} | RestTree]) -> count_leafs_simple([{_Key, _Value, SubTree} | RestTree]) -> count_leafs_simple(SubTree) + count_leafs_simple(RestTree). - fold(_Fun, Acc, []) -> Acc; -fold(Fun, Acc0, [{Pos, Tree}|Rest]) -> +fold(Fun, Acc0, [{Pos, Tree} | Rest]) -> Acc1 = fold_simple(Fun, Acc0, Pos, [Tree]), fold(Fun, Acc1, Rest). fold_simple(_Fun, Acc, _Pos, []) -> Acc; fold_simple(Fun, Acc0, Pos, [{Key, Value, SubTree} | RestTree]) -> - Type = if SubTree == [] -> leaf; true -> branch end, + Type = + if + SubTree == [] -> leaf; + true -> branch + end, Acc1 = Fun({Pos, Key}, Value, Type, Acc0), - Acc2 = fold_simple(Fun, Acc1, Pos+1, SubTree), + Acc2 = fold_simple(Fun, Acc1, Pos + 1, SubTree), fold_simple(Fun, Acc2, Pos, RestTree). - map(_Fun, []) -> []; -map(Fun, [{Pos, Tree}|Rest]) -> +map(Fun, [{Pos, Tree} | Rest]) -> case erlang:fun_info(Fun, arity) of - {arity, 2} -> - [NewTree] = map_simple(fun(A,B,_C) -> Fun(A,B) end, Pos, [Tree]), - [{Pos, NewTree} | map(Fun, Rest)]; - {arity, 3} -> - [NewTree] = map_simple(Fun, Pos, [Tree]), - [{Pos, NewTree} | map(Fun, Rest)] + {arity, 2} -> + [NewTree] = map_simple(fun(A, B, _C) -> Fun(A, B) end, Pos, [Tree]), + [{Pos, NewTree} | map(Fun, Rest)]; + {arity, 3} -> + [NewTree] = map_simple(Fun, Pos, [Tree]), + [{Pos, NewTree} | map(Fun, Rest)] end. map_simple(_Fun, _Pos, []) -> []; map_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) -> - Value2 = Fun({Pos, Key}, Value, - if SubTree == [] -> leaf; true -> branch end), + Value2 = Fun( + {Pos, Key}, + Value, + if + SubTree == [] -> leaf; + true -> branch + end + ), [{Key, Value2, map_simple(Fun, Pos + 1, SubTree)} | map_simple(Fun, Pos, RestTree)]. - mapfold(_Fun, Acc, []) -> {[], Acc}; mapfold(Fun, Acc, [{Pos, Tree} | Rest]) -> @@ -444,16 +462,22 @@ mapfold(Fun, Acc, [{Pos, Tree} | Rest]) -> mapfold_simple(_Fun, Acc, _Pos, []) -> {[], Acc}; mapfold_simple(Fun, Acc, Pos, [{Key, Value, SubTree} | RestTree]) -> - {Value2, Acc2} = Fun({Pos, Key}, Value, - if SubTree == [] -> leaf; true -> branch end, Acc), + {Value2, Acc2} = Fun( + {Pos, Key}, + Value, + if + SubTree == [] -> leaf; + true -> branch + end, + Acc + ), {SubTree2, Acc3} = mapfold_simple(Fun, Acc2, Pos + 1, SubTree), {RestTree2, Acc4} = mapfold_simple(Fun, Acc3, Pos, RestTree), {[{Key, Value2, SubTree2} | RestTree2], Acc4}. - map_leafs(_Fun, []) -> []; -map_leafs(Fun, [{Pos, Tree}|Rest]) -> +map_leafs(Fun, [{Pos, Tree} | Rest]) -> [NewTree] = map_leafs_simple(Fun, Pos, [Tree]), [{Pos, NewTree} | map_leafs(Fun, Rest)]. @@ -465,19 +489,22 @@ map_leafs_simple(Fun, Pos, [{Key, Value, []} | RestTree]) -> map_leafs_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) -> [{Key, Value, map_leafs_simple(Fun, Pos + 1, SubTree)} | map_leafs_simple(Fun, Pos, RestTree)]. - stem(Trees, Limit) -> try - {_, Branches} = lists:foldl(fun(Tree, {Seen, TreeAcc}) -> - {NewSeen, NewBranches} = stem_tree(Tree, Limit, Seen), - {NewSeen, NewBranches ++ TreeAcc} - end, {sets:new(), []}, Trees), + {_, Branches} = lists:foldl( + fun(Tree, {Seen, TreeAcc}) -> + {NewSeen, NewBranches} = stem_tree(Tree, Limit, Seen), + {NewSeen, NewBranches ++ TreeAcc} + end, + {sets:new(), []}, + Trees + ), lists:sort(Branches) - catch throw:dupe_keys -> - repair_tree(Trees, Limit) + catch + throw:dupe_keys -> + repair_tree(Trees, Limit) end. - stem_tree({Depth, Child}, Limit, Seen) -> case stem_tree(Depth, Child, Limit, Seen) of {NewSeen, _, NewChild, NewBranches} -> @@ -486,41 +513,45 @@ stem_tree({Depth, Child}, Limit, Seen) -> {NewSeen, NewBranches} end. - stem_tree(_Depth, {Key, _Val, []} = Leaf, Limit, Seen) -> {check_key(Key, Seen), Limit - 1, Leaf, []}; - stem_tree(Depth, {Key, Val, Children}, Limit, Seen0) -> Seen1 = check_key(Key, Seen0), - FinalAcc = lists:foldl(fun(Child, Acc) -> - {SeenAcc, LimitPosAcc, ChildAcc, BranchAcc} = Acc, - case stem_tree(Depth + 1, Child, Limit, SeenAcc) of - {NewSeenAcc, LimitPos, NewChild, NewBranches} -> - NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc), - NewChildAcc = [NewChild | ChildAcc], - NewBranchAcc = NewBranches ++ BranchAcc, - {NewSeenAcc, NewLimitPosAcc, NewChildAcc, NewBranchAcc}; - {NewSeenAcc, LimitPos, NewBranches} -> - NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc), - NewBranchAcc = NewBranches ++ BranchAcc, - {NewSeenAcc, NewLimitPosAcc, ChildAcc, NewBranchAcc} - end - end, {Seen1, -1, [], []}, Children), + FinalAcc = lists:foldl( + fun(Child, Acc) -> + {SeenAcc, LimitPosAcc, ChildAcc, BranchAcc} = Acc, + case stem_tree(Depth + 1, Child, Limit, SeenAcc) of + {NewSeenAcc, LimitPos, NewChild, NewBranches} -> + NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc), + NewChildAcc = [NewChild | ChildAcc], + NewBranchAcc = NewBranches ++ BranchAcc, + {NewSeenAcc, NewLimitPosAcc, NewChildAcc, NewBranchAcc}; + {NewSeenAcc, LimitPos, NewBranches} -> + NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc), + NewBranchAcc = NewBranches ++ BranchAcc, + {NewSeenAcc, NewLimitPosAcc, ChildAcc, NewBranchAcc} + end + end, + {Seen1, -1, [], []}, + Children + ), {FinalSeen, FinalLimitPos, FinalChildren, FinalBranches} = FinalAcc, case FinalLimitPos of N when N > 0, length(FinalChildren) > 0 -> FinalNode = {Key, Val, lists:reverse(FinalChildren)}, {FinalSeen, FinalLimitPos - 1, FinalNode, FinalBranches}; 0 when length(FinalChildren) > 0 -> - NewBranches = lists:map(fun(Child) -> - {Depth + 1, Child} - end, lists:reverse(FinalChildren)), + NewBranches = lists:map( + fun(Child) -> + {Depth + 1, Child} + end, + lists:reverse(FinalChildren) + ), {FinalSeen, -1, NewBranches ++ FinalBranches}; N when N < 0, length(FinalChildren) == 0 -> {FinalSeen, FinalLimitPos - 1, FinalBranches} end. - check_key(Key, Seen) -> case sets:is_element(Key, Seen) of true -> @@ -529,29 +560,40 @@ check_key(Key, Seen) -> sets:add_element(Key, Seen) end. - repair_tree(Trees, Limit) -> % flatten each branch in a tree into a tree path, sort by starting rev # - Paths = lists:sort(lists:map(fun({Pos, Path}) -> - StemmedPath = lists:sublist(Path, Limit), - {Pos + 1 - length(StemmedPath), StemmedPath} - end, get_all_leafs_full(Trees))), + Paths = lists:sort( + lists:map( + fun({Pos, Path}) -> + StemmedPath = lists:sublist(Path, Limit), + {Pos + 1 - length(StemmedPath), StemmedPath} + end, + get_all_leafs_full(Trees) + ) + ), % convert paths back to trees lists:foldl( - fun({StartPos, Path},TreeAcc) -> + fun({StartPos, Path}, TreeAcc) -> [SingleTree] = lists:foldl( - fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path), + fun({K, V}, NewTreeAcc) -> [{K, V, NewTreeAcc}] end, [], Path + ), {NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}), NewTrees - end, [], Paths). - - -value_pref(Tuple, _) when is_tuple(Tuple), - (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) -> + end, + [], + Paths + ). + +value_pref(Tuple, _) when + is_tuple(Tuple), + (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) +-> Tuple; -value_pref(_, Tuple) when is_tuple(Tuple), - (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) -> +value_pref(_, Tuple) when + is_tuple(Tuple), + (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) +-> Tuple; value_pref(?REV_MISSING, Other) -> Other; diff --git a/src/couch/src/couch_lru.erl b/src/couch/src/couch_lru.erl index 618a0144f..1fad20280 100644 --- a/src/couch/src/couch_lru.erl +++ b/src/couch/src/couch_lru.erl @@ -24,14 +24,14 @@ insert(DbName, {Tree0, Dict0}) -> update(DbName, {Tree0, Dict0}) -> case dict:find(DbName, Dict0) of - {ok, Old} -> - New = couch_util:unique_monotonic_integer(), - Tree = gb_trees:insert(New, DbName, gb_trees:delete(Old, Tree0)), - Dict = dict:store(DbName, New, Dict0), - {Tree, Dict}; - error -> - % We closed this database before processing the update. Ignore - {Tree0, Dict0} + {ok, Old} -> + New = couch_util:unique_monotonic_integer(), + Tree = gb_trees:insert(New, DbName, gb_trees:delete(Old, Tree0)), + Dict = dict:store(DbName, New, Dict0), + {Tree, Dict}; + error -> + % We closed this database before processing the update. Ignore + {Tree0, Dict0} end. %% Attempt to close the oldest idle database. @@ -47,21 +47,22 @@ close_int({Lru, DbName, Iter}, {Tree, Dict} = Cache) -> CouchDbsPidToName = couch_server:couch_dbs_pid_to_name(DbName), case ets:update_element(CouchDbs, DbName, {#entry.lock, locked}) of - true -> - [#entry{db = Db, pid = Pid}] = ets:lookup(CouchDbs, DbName), - case couch_db:is_idle(Db) of true -> - true = ets:delete(CouchDbs, DbName), - true = ets:delete(CouchDbsPidToName, Pid), - exit(Pid, kill), - {true, {gb_trees:delete(Lru, Tree), dict:erase(DbName, Dict)}}; + true -> + [#entry{db = Db, pid = Pid}] = ets:lookup(CouchDbs, DbName), + case couch_db:is_idle(Db) of + true -> + true = ets:delete(CouchDbs, DbName), + true = ets:delete(CouchDbsPidToName, Pid), + exit(Pid, kill), + {true, {gb_trees:delete(Lru, Tree), dict:erase(DbName, Dict)}}; + false -> + ElemSpec = {#entry.lock, unlocked}, + true = ets:update_element(CouchDbs, DbName, ElemSpec), + couch_stats:increment_counter([couchdb, couch_server, lru_skip]), + close_int(gb_trees:next(Iter), update(DbName, Cache)) + end; false -> - ElemSpec = {#entry.lock, unlocked}, - true = ets:update_element(CouchDbs, DbName, ElemSpec), - couch_stats:increment_counter([couchdb, couch_server, lru_skip]), - close_int(gb_trees:next(Iter), update(DbName, Cache)) - end; - false -> - NewTree = gb_trees:delete(Lru, Tree), - NewIter = gb_trees:iterator(NewTree), - close_int(gb_trees:next(NewIter), {NewTree, dict:erase(DbName, Dict)}) -end. + NewTree = gb_trees:delete(Lru, Tree), + NewIter = gb_trees:iterator(NewTree), + close_int(gb_trees:next(NewIter), {NewTree, dict:erase(DbName, Dict)}) + end. diff --git a/src/couch/src/couch_multidb_changes.erl b/src/couch/src/couch_multidb_changes.erl index e2bbda3e3..adb1b740f 100644 --- a/src/couch/src/couch_multidb_changes.erl +++ b/src/couch/src/couch_multidb_changes.erl @@ -15,27 +15,27 @@ -behaviour(gen_server). -export([ - start_link/4 + start_link/4 ]). -export([ - init/1, - terminate/2, - handle_call/3, - handle_info/2, - handle_cast/2, - code_change/3 + init/1, + terminate/2, + handle_call/3, + handle_info/2, + handle_cast/2, + code_change/3 ]). -export([ - changes_reader/3, - changes_reader_cb/3 + changes_reader/3, + changes_reader_cb/3 ]). -include_lib("couch/include/couch_db.hrl"). -include_lib("mem3/include/mem3.hrl"). --define(CTX, {user_ctx, #user_ctx{roles=[<<"_admin">>, <<"_replicator">>]}}). +-define(CTX, {user_ctx, #user_ctx{roles = [<<"_admin">>, <<"_replicator">>]}}). -define(AVG_DELAY_MSEC, 10). -define(MAX_DELAY_MSEC, 120000). @@ -68,20 +68,18 @@ -callback db_change(DbName :: binary(), Change :: term(), Context :: term()) -> Context :: term(). - % External API - % Opts list can contain: % - `skip_ddocs` : Skip design docs -spec start_link(binary(), module(), term(), list()) -> {ok, pid()} | ignore | {error, term()}. start_link(DbSuffix, Module, Context, Opts) when - is_binary(DbSuffix), is_atom(Module), is_list(Opts) -> + is_binary(DbSuffix), is_atom(Module), is_list(Opts) +-> gen_server:start_link(?MODULE, [DbSuffix, Module, Context, Opts], []). - % gen_server callbacks init([DbSuffix, Module, Context, Opts]) -> @@ -98,21 +96,21 @@ init([DbSuffix, Module, Context, Opts]) -> skip_ddocs = proplists:is_defined(skip_ddocs, Opts) }}. - terminate(_Reason, _State) -> ok. - -handle_call({change, DbName, Change}, _From, - #state{skip_ddocs=SkipDDocs, mod=Mod, ctx=Ctx} = State) -> +handle_call( + {change, DbName, Change}, + _From, + #state{skip_ddocs = SkipDDocs, mod = Mod, ctx = Ctx} = State +) -> case {SkipDDocs, is_design_doc(Change)} of {true, true} -> {reply, ok, State}; {_, _} -> - {reply, ok, State#state{ctx=Mod:db_change(DbName, Change, Ctx)}} + {reply, ok, State#state{ctx = Mod:db_change(DbName, Change, Ctx)}} end; - -handle_call({checkpoint, DbName, EndSeq}, _From, #state{tid=Ets} = State) -> +handle_call({checkpoint, DbName, EndSeq}, _From, #state{tid = Ets} = State) -> case ets:lookup(Ets, DbName) of [] -> true = ets:insert(Ets, {DbName, EndSeq, false}); @@ -121,11 +119,9 @@ handle_call({checkpoint, DbName, EndSeq}, _From, #state{tid=Ets} = State) -> end, {reply, ok, State}. - handle_cast({resume_scan, DbName}, State) -> {noreply, resume_scan(DbName, State)}. - handle_info({'$couch_event', DbName, Event}, #state{suffix = Suf} = State) -> case Suf =:= couch_db:dbname_suffix(DbName) of true -> @@ -133,23 +129,22 @@ handle_info({'$couch_event', DbName, Event}, #state{suffix = Suf} = State) -> _ -> {noreply, State} end; - handle_info({'DOWN', Ref, _, _, Info}, #state{event_server = Ref} = State) -> {stop, {couch_event_server_died, Info}, State}; - handle_info({'EXIT', From, normal}, #state{scanner = From} = State) -> - {noreply, State#state{scanner=nil}}; - + {noreply, State#state{scanner = nil}}; handle_info({'EXIT', From, Reason}, #state{scanner = From} = State) -> {stop, {scanner_died, Reason}, State}; - handle_info({'EXIT', From, Reason}, #state{pids = Pids} = State) -> couch_log:debug("~p change feed exited ~p", [State#state.suffix, From]), case lists:keytake(From, 2, Pids) of {value, {DbName, From}, NewPids} -> - if Reason == normal -> ok; true -> - Fmt = "~s : Known change feed ~w died :: ~w", - couch_log:error(Fmt, [?MODULE, From, Reason]) + if + Reason == normal -> + ok; + true -> + Fmt = "~s : Known change feed ~w died :: ~w", + couch_log:error(Fmt, [?MODULE, From, Reason]) end, NewState = State#state{pids = NewPids}, case ets:lookup(State#state.tid, DbName) of @@ -165,15 +160,12 @@ handle_info({'EXIT', From, Reason}, #state{pids = Pids} = State) -> couch_log:error(Fmt, [?MODULE, State#state.suffix, From, Reason]), {stop, {unexpected_exit, From, Reason}, State} end; - handle_info(_Msg, State) -> {noreply, State}. - code_change(_OldVsn, State, _Extra) -> {ok, State}. - % Private functions -spec register_with_event_server(pid()) -> reference(). @@ -182,7 +174,6 @@ register_with_event_server(Server) -> couch_event:register_all(Server), Ref. - -spec db_callback(created | deleted | updated, binary(), #state{}) -> #state{}. db_callback(created, DbName, #state{mod = Mod, ctx = Ctx} = State) -> NewState = State#state{ctx = Mod:db_created(DbName, Ctx)}, @@ -194,9 +185,8 @@ db_callback(updated, DbName, State) -> db_callback(_Other, _DbName, State) -> State. - -spec resume_scan(binary(), #state{}) -> #state{}. -resume_scan(DbName, #state{pids=Pids, tid=Ets} = State) -> +resume_scan(DbName, #state{pids = Pids, tid = Ets} = State) -> case {lists:keyfind(DbName, 1, Pids), ets:lookup(Ets, DbName)} of {{DbName, _}, []} -> % Found existing change feed, but not entry in ETS @@ -217,20 +207,18 @@ resume_scan(DbName, #state{pids=Pids, tid=Ets} = State) -> Mod = State#state.mod, Ctx = Mod:db_found(DbName, State#state.ctx), Pid = start_changes_reader(DbName, 0), - State#state{ctx=Ctx, pids=[{DbName, Pid} | Pids]}; + State#state{ctx = Ctx, pids = [{DbName, Pid} | Pids]}; {false, [{DbName, EndSeq, _}]} -> % No existing change feed running. Found existing checkpoint. % Start a new change reader from last checkpoint. true = ets:insert(Ets, {DbName, EndSeq, false}), Pid = start_changes_reader(DbName, EndSeq), - State#state{pids=[{DbName, Pid} | Pids]} - end. - + State#state{pids = [{DbName, Pid} | Pids]} + end. start_changes_reader(DbName, Since) -> spawn_link(?MODULE, changes_reader, [self(), DbName, Since]). - changes_reader(Server, DbName, Since) -> {ok, Db} = couch_db:open_int(DbName, [?CTX, sys_db]), ChangesArgs = #changes_args{ @@ -242,7 +230,6 @@ changes_reader(Server, DbName, Since) -> ChFun = couch_changes:handle_db_changes(ChangesArgs, {json_req, null}, Db), ChFun({fun ?MODULE:changes_reader_cb/3, {Server, DbName}}). - changes_reader_cb({change, Change, _}, _, {Server, DbName}) -> ok = gen_server:call(Server, {change, DbName, Change}, infinity), {Server, DbName}; @@ -252,34 +239,35 @@ changes_reader_cb({stop, EndSeq}, _, {Server, DbName}) -> changes_reader_cb(_, _, Acc) -> Acc. - scan_all_dbs(Server, DbSuffix) when is_pid(Server) -> ok = scan_local_db(Server, DbSuffix), {ok, Db} = mem3_util:ensure_exists( - config:get("mem3", "shards_db", "_dbs")), + config:get("mem3", "shards_db", "_dbs") + ), ChangesFun = couch_changes:handle_db_changes(#changes_args{}, nil, Db), ChangesFun({fun scan_changes_cb/3, {Server, DbSuffix, 1}}), couch_db:close(Db). - scan_changes_cb({change, {Change}, _}, _, {_Server, DbSuffix, _Count} = Acc) -> DbName = couch_util:get_value(<<"id">>, Change), - case DbName of <<"_design/", _/binary>> -> Acc; _Else -> - NameMatch = DbSuffix =:= couch_db:dbname_suffix(DbName), - case {NameMatch, couch_replicator_utils:is_deleted(Change)} of - {false, _} -> - Acc; - {true, true} -> - Acc; - {true, false} -> - Shards = local_shards(DbName), - lists:foldl(fun notify_fold/2, Acc, Shards) - end + case DbName of + <<"_design/", _/binary>> -> + Acc; + _Else -> + NameMatch = DbSuffix =:= couch_db:dbname_suffix(DbName), + case {NameMatch, couch_replicator_utils:is_deleted(Change)} of + {false, _} -> + Acc; + {true, true} -> + Acc; + {true, false} -> + Shards = local_shards(DbName), + lists:foldl(fun notify_fold/2, Acc, Shards) + end end; scan_changes_cb(_, _, Acc) -> Acc. - local_shards(DbName) -> try [ShardName || #shard{name = ShardName} <- mem3:local_shards(DbName)] @@ -288,7 +276,6 @@ local_shards(DbName) -> [] end. - notify_fold(DbName, {Server, DbSuffix, Count}) -> Jitter = jitter(Count), spawn_link(fun() -> @@ -297,7 +284,6 @@ notify_fold(DbName, {Server, DbSuffix, Count}) -> end), {Server, DbSuffix, Count + 1}. - % Jitter is proportional to the number of shards found so far. This is done to % avoid a stampede and notifying the callback function with potentially a large % number of shards back to back during startup. @@ -305,7 +291,6 @@ jitter(N) -> Range = min(2 * N * ?AVG_DELAY_MSEC, ?MAX_DELAY_MSEC), couch_rand:uniform(Range). - scan_local_db(Server, DbSuffix) when is_pid(Server) -> case couch_db:open_int(DbSuffix, [?CTX, sys_db, nologifmissing]) of {ok, Db} -> @@ -315,7 +300,6 @@ scan_local_db(Server, DbSuffix) when is_pid(Server) -> ok end. - is_design_doc({Change}) -> case lists:keyfind(<<"id">>, 1, Change) of false -> @@ -324,13 +308,11 @@ is_design_doc({Change}) -> is_design_doc_id(Id) end. - is_design_doc_id(<<?DESIGN_DOC_PREFIX, _/binary>>) -> true; is_design_doc_id(_) -> false. - -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). @@ -380,7 +362,6 @@ couch_multidb_changes_test_() -> } }. - setup_all() -> mock_logs(), mock_callback_mod(), @@ -389,25 +370,31 @@ setup_all() -> meck:expect(mem3_util, ensure_exists, 1, {ok, dbs}), ChangesFun = meck:val(fun(_) -> ok end), meck:expect(couch_changes, handle_db_changes, 3, ChangesFun), - meck:expect(couch_db, open_int, - fun(?DBNAME, [?CTX, sys_db]) -> {ok, db}; + meck:expect( + couch_db, + open_int, + fun + (?DBNAME, [?CTX, sys_db]) -> {ok, db}; (_, _) -> {not_found, no_db_file} - end), + end + ), meck:expect(couch_db, close, 1, ok), mock_changes_reader(), % create process to stand in for couch_event_server % mocking erlang:monitor doesn't work, so give it real process to monitor - EvtPid = spawn_link(fun() -> receive looper -> ok end end), + EvtPid = spawn_link(fun() -> + receive + looper -> ok + end + end), true = register(couch_event_server, EvtPid), EvtPid. - teardown_all(EvtPid) -> unlink(EvtPid), exit(EvtPid, kill), meck:unload(). - setup() -> meck:reset([ ?MOD, @@ -417,11 +404,9 @@ setup() -> couch_log ]). - teardown(_) -> ok. - t_handle_call_change() -> ?_test(begin State = mock_state(), @@ -431,7 +416,6 @@ t_handle_call_change() -> ?assert(meck:called(?MOD, db_change, [?DBNAME, Change, zig])) end). - t_handle_call_change_filter_design_docs() -> ?_test(begin State0 = mock_state(), @@ -442,7 +426,6 @@ t_handle_call_change_filter_design_docs() -> ?assertNot(meck:called(?MOD, db_change, [?DBNAME, Change, zig])) end). - t_handle_call_checkpoint_new() -> ?_test(begin Tid = mock_ets(), @@ -452,7 +435,6 @@ t_handle_call_checkpoint_new() -> ets:delete(Tid) end). - t_handle_call_checkpoint_existing() -> ?_test(begin Tid = mock_ets(), @@ -463,7 +445,6 @@ t_handle_call_checkpoint_existing() -> ets:delete(Tid) end). - t_handle_info_created() -> ?_test(begin Tid = mock_ets(), @@ -473,18 +454,16 @@ t_handle_info_created() -> ?assert(meck:called(?MOD, db_created, [?DBNAME, zig])) end). - t_handle_info_deleted() -> - ?_test(begin + ?_test(begin State = mock_state(), handle_info_check({'$couch_event', ?DBNAME, deleted}, State), ?assert(meck:validate(?MOD)), ?assert(meck:called(?MOD, db_deleted, [?DBNAME, zig])) end). - t_handle_info_updated() -> - ?_test(begin + ?_test(begin Tid = mock_ets(), State = mock_state(Tid), handle_info_check({'$couch_event', ?DBNAME, updated}, State), @@ -492,9 +471,8 @@ t_handle_info_updated() -> ?assert(meck:called(?MOD, db_found, [?DBNAME, zig])) end). - t_handle_info_other_event() -> - ?_test(begin + ?_test(begin State = mock_state(), handle_info_check({'$couch_event', ?DBNAME, somethingelse}, State), ?assertNot(meck:called(?MOD, db_created, [?DBNAME, somethingelse])), @@ -502,15 +480,13 @@ t_handle_info_other_event() -> ?assertNot(meck:called(?MOD, db_found, [?DBNAME, somethingelse])) end). - t_handle_info_created_other_db() -> - ?_test(begin + ?_test(begin State = mock_state(), handle_info_check({'$couch_event', <<"otherdb">>, created}, State), ?assertNot(meck:called(?MOD, db_created, [?DBNAME, zig])) end). - t_handle_info_scanner_exit_normal() -> ?_test(begin Res = handle_info({'EXIT', spid, normal}, mock_state()), @@ -519,32 +495,28 @@ t_handle_info_scanner_exit_normal() -> ?assertEqual(nil, RState#state.scanner) end). - t_handle_info_scanner_crashed() -> ?_test(begin Res = handle_info({'EXIT', spid, oops}, mock_state()), ?assertMatch({stop, {scanner_died, oops}, _State}, Res) end). - t_handle_info_event_server_exited() -> ?_test(begin Res = handle_info({'DOWN', esref, type, espid, reason}, mock_state()), ?assertMatch({stop, {couch_event_server_died, reason}, _}, Res) end). - t_handle_info_unknown_pid_exited() -> ?_test(begin State0 = mock_state(), - Res0 = handle_info({'EXIT', somepid, normal}, State0), + Res0 = handle_info({'EXIT', somepid, normal}, State0), ?assertMatch({noreply, State0}, Res0), State1 = mock_state(), Res1 = handle_info({'EXIT', somepid, oops}, State1), ?assertMatch({stop, {unexpected_exit, somepid, oops}, State1}, Res1) end). - t_handle_info_change_feed_exited() -> ?_test(begin Tid0 = mock_ets(), @@ -563,7 +535,6 @@ t_handle_info_change_feed_exited() -> ets:delete(Tid1) end). - t_handle_info_change_feed_exited_and_need_rescan() -> ?_test(begin Tid = mock_ets(), @@ -582,7 +553,6 @@ t_handle_info_change_feed_exited_and_need_rescan() -> ets:delete(Tid) end). - t_spawn_changes_reader() -> ?_test(begin Pid = start_changes_reader(?DBNAME, 3), @@ -592,16 +562,20 @@ t_spawn_changes_reader() -> ?assert(meck:validate(couch_db)), ?assert(meck:validate(couch_changes)), ?assert(meck:called(couch_db, open_int, [?DBNAME, [?CTX, sys_db]])), - ?assert(meck:called(couch_changes, handle_db_changes, [ - #changes_args{ - include_docs = true, - since = 3, - feed = "normal", - timeout = infinity - }, {json_req, null}, db])) + ?assert( + meck:called(couch_changes, handle_db_changes, [ + #changes_args{ + include_docs = true, + since = 3, + feed = "normal", + timeout = infinity + }, + {json_req, null}, + db + ]) + ) end). - t_changes_reader_cb_change() -> ?_test(begin {ok, Pid} = start_link(?SUFFIX, ?MOD, zig, []), @@ -613,7 +587,6 @@ t_changes_reader_cb_change() -> exit(Pid, kill) end). - t_changes_reader_cb_stop() -> ?_test(begin {ok, Pid} = start_link(?SUFFIX, ?MOD, zig, []), @@ -626,11 +599,9 @@ t_changes_reader_cb_stop() -> exit(Pid, kill) end). - t_changes_reader_cb_other() -> ?_assertEqual(acc, changes_reader_cb(other, chtype, acc)). - t_handle_call_resume_scan_no_chfeed_no_ets_entry() -> ?_test(begin Tid = mock_ets(), @@ -644,17 +615,21 @@ t_handle_call_resume_scan_no_chfeed_no_ets_entry() -> [{?DBNAME, Pid}] = RState#state.pids, ChArgs = kill_mock_changes_reader_and_get_its_args(Pid), ?assertEqual({self(), ?DBNAME}, ChArgs), - ?assert(meck:called(couch_changes, handle_db_changes, [ - #changes_args{ - include_docs = true, - since = 0, - feed = "normal", - timeout = infinity - }, {json_req, null}, db])), + ?assert( + meck:called(couch_changes, handle_db_changes, [ + #changes_args{ + include_docs = true, + since = 0, + feed = "normal", + timeout = infinity + }, + {json_req, null}, + db + ]) + ), ets:delete(Tid) end). - t_handle_call_resume_scan_chfeed_no_ets_entry() -> ?_test(begin Tid = mock_ets(), @@ -667,7 +642,6 @@ t_handle_call_resume_scan_chfeed_no_ets_entry() -> kill_mock_changes_reader_and_get_its_args(Pid) end). - t_handle_call_resume_scan_chfeed_ets_entry() -> ?_test(begin Tid = mock_ets(), @@ -681,7 +655,6 @@ t_handle_call_resume_scan_chfeed_ets_entry() -> kill_mock_changes_reader_and_get_its_args(Pid) end). - t_handle_call_resume_scan_no_chfeed_ets_entry() -> ?_test(begin Tid = mock_ets(), @@ -694,92 +667,96 @@ t_handle_call_resume_scan_no_chfeed_ets_entry() -> [{?DBNAME, Pid}] = RState#state.pids, ChArgs = kill_mock_changes_reader_and_get_its_args(Pid), ?assertEqual({self(), ?DBNAME}, ChArgs), - ?assert(meck:called(couch_changes, handle_db_changes, [ - #changes_args{ - include_docs = true, - since = 1, - feed = "normal", - timeout = infinity - }, {json_req, null}, db])), + ?assert( + meck:called(couch_changes, handle_db_changes, [ + #changes_args{ + include_docs = true, + since = 1, + feed = "normal", + timeout = infinity + }, + {json_req, null}, + db + ]) + ), ets:delete(Tid) end). - t_start_link() -> ?_test(begin {ok, Pid} = start_link(?SUFFIX, ?MOD, nil, []), ?assert(is_pid(Pid)), - ?assertMatch(#state{ - mod = ?MOD, - suffix = ?SUFFIX, - ctx = nil, - pids = [], - skip_ddocs = false - }, sys:get_state(Pid)), + ?assertMatch( + #state{ + mod = ?MOD, + suffix = ?SUFFIX, + ctx = nil, + pids = [], + skip_ddocs = false + }, + sys:get_state(Pid) + ), unlink(Pid), exit(Pid, kill), ?assert(meck:called(couch_event, register_all, [Pid])) end). - t_start_link_no_ddocs() -> ?_test(begin {ok, Pid} = start_link(?SUFFIX, ?MOD, nil, [skip_ddocs]), ?assert(is_pid(Pid)), - ?assertMatch(#state{ - mod = ?MOD, - suffix = ?SUFFIX, - ctx = nil, - pids = [], - skip_ddocs = true - }, sys:get_state(Pid)), + ?assertMatch( + #state{ + mod = ?MOD, + suffix = ?SUFFIX, + ctx = nil, + pids = [], + skip_ddocs = true + }, + sys:get_state(Pid) + ), unlink(Pid), exit(Pid, kill) end). - t_misc_gen_server_callbacks() -> ?_test(begin ?assertEqual(ok, terminate(reason, state)), ?assertEqual({ok, state}, code_change(old, state, extra)) end). - scan_dbs_test_() -> -{ - setup, - fun() -> - Ctx = test_util:start_couch([mem3, fabric]), - GlobalDb = ?tempdb(), - ok = fabric:create_db(GlobalDb, [?CTX]), - #shard{name = LocalDb} = hd(mem3:local_shards(GlobalDb)), - {Ctx, GlobalDb, LocalDb} - end, - fun({Ctx, GlobalDb, _LocalDb}) -> - fabric:delete_db(GlobalDb, [?CTX]), - test_util:stop_couch(Ctx) - end, - {with, [ - fun t_find_shard/1, - fun t_shard_not_found/1, - fun t_pass_local/1, - fun t_fail_local/1 - ]} -}. - + { + setup, + fun() -> + Ctx = test_util:start_couch([mem3, fabric]), + GlobalDb = ?tempdb(), + ok = fabric:create_db(GlobalDb, [?CTX]), + #shard{name = LocalDb} = hd(mem3:local_shards(GlobalDb)), + {Ctx, GlobalDb, LocalDb} + end, + fun({Ctx, GlobalDb, _LocalDb}) -> + fabric:delete_db(GlobalDb, [?CTX]), + test_util:stop_couch(Ctx) + end, + {with, [ + fun t_find_shard/1, + fun t_shard_not_found/1, + fun t_pass_local/1, + fun t_fail_local/1 + ]} + }. t_find_shard({_, DbName, _}) -> ?_test(begin ?assertEqual(2, length(local_shards(DbName))) end). - t_shard_not_found(_) -> ?_test(begin ?assertEqual([], local_shards(?tempdb())) end). - t_pass_local({_, _, LocalDb}) -> ?_test(begin scan_local_db(self(), LocalDb), @@ -787,11 +764,10 @@ t_pass_local({_, _, LocalDb}) -> {'$gen_cast', Msg} -> ?assertEqual(Msg, {resume_scan, LocalDb}) after 0 -> - ?assert(false) + ?assert(false) end end). - t_fail_local({_, _, LocalDb}) -> ?_test(begin scan_local_db(self(), <<"some_other_db">>), @@ -799,11 +775,10 @@ t_fail_local({_, _, LocalDb}) -> {'$gen_cast', Msg} -> ?assertNotEqual(Msg, {resume_scan, LocalDb}) after 0 -> - ?assert(true) + ?assert(true) end end). - % Test helper functions mock_logs() -> @@ -812,7 +787,6 @@ mock_logs() -> meck:expect(couch_log, info, 2, ok), meck:expect(couch_log, debug, 2, ok). - mock_callback_mod() -> meck:new(?MOD, [non_strict]), meck:expect(?MOD, db_created, fun(_DbName, Ctx) -> Ctx end), @@ -820,7 +794,6 @@ mock_callback_mod() -> meck:expect(?MOD, db_found, fun(_DbName, Ctx) -> Ctx end), meck:expect(?MOD, db_change, fun(_DbName, _Change, Ctx) -> Ctx end). - mock_changes_reader_loop({_CbFun, {Server, DbName}}) -> receive die -> @@ -834,23 +807,23 @@ kill_mock_changes_reader_and_get_its_args(Pid) -> receive {'DOWN', Ref, _, Pid, {Server, DbName}} -> {Server, DbName} - after 1000 -> - erlang:error(spawn_change_reader_timeout) + after 1000 -> + erlang:error(spawn_change_reader_timeout) end. - mock_changes_reader() -> - meck:expect(couch_changes, handle_db_changes, + meck:expect( + couch_changes, + handle_db_changes, fun (_ChArgs, _Req, db) -> fun mock_changes_reader_loop/1; (_ChArgs, _Req, dbs) -> fun(_) -> ok end - end). - + end + ). mock_ets() -> ets:new(multidb_test_ets, [set, public]). - mock_state() -> #state{ mod = ?MOD, @@ -858,19 +831,17 @@ mock_state() -> suffix = ?SUFFIX, event_server = esref, scanner = spid, - pids = []}. - + pids = [] + }. mock_state(Ets) -> State = mock_state(), State#state{tid = Ets}. - mock_state(Ets, Pid) -> State = mock_state(Ets), State#state{pids = [{?DBNAME, Pid}]}. - change_row(Id) when is_binary(Id) -> {[ {<<"seq">>, 1}, @@ -879,13 +850,10 @@ change_row(Id) when is_binary(Id) -> {doc, {[{<<"_id">>, Id}, {<<"_rev">>, <<"1-f00">>}]}} ]}. - handle_call_ok(Msg, State) -> ?assertMatch({reply, ok, _}, handle_call(Msg, from, State)). - handle_info_check(Msg, State) -> ?assertMatch({noreply, _}, handle_info(Msg, State)). - -endif. diff --git a/src/couch/src/couch_native_process.erl b/src/couch/src/couch_native_process.erl index eee8b2860..feea00c3a 100644 --- a/src/couch/src/couch_native_process.erl +++ b/src/couch/src/couch_native_process.erl @@ -41,8 +41,15 @@ -behaviour(gen_server). -vsn(1). --export([start_link/0,init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3, - handle_info/2]). +-export([ + start_link/0, + init/1, + terminate/2, + handle_call/3, + handle_cast/2, + code_change/3, + handle_info/2 +]). -export([set_timeout/2, prompt/2]). -define(STATE, native_proc_state). @@ -74,15 +81,15 @@ prompt(Pid, Data) when is_list(Data) -> init([]) -> V = config:get("query_server_config", "os_process_idle_limit", "300"), Idle = list_to_integer(V) * 1000, - {ok, #evstate{ddocs=dict:new(), idle=Idle}, Idle}. + {ok, #evstate{ddocs = dict:new(), idle = Idle}, Idle}. handle_call({set_timeout, TimeOut}, _From, State) -> - {reply, ok, State#evstate{timeout=TimeOut}, State#evstate.idle}; - + {reply, ok, State#evstate{timeout = TimeOut}, State#evstate.idle}; handle_call({prompt, Data}, _From, State) -> - couch_log:debug("Prompt native qs: ~s",[?JSON_ENCODE(Data)]), - {NewState, Resp} = try run(State, to_binary(Data)) of - {S, R} -> {S, R} + couch_log:debug("Prompt native qs: ~s", [?JSON_ENCODE(Data)]), + {NewState, Resp} = + try run(State, to_binary(Data)) of + {S, R} -> {S, R} catch throw:{error, Why} -> {State, [<<"error">>, Why, Why]} @@ -118,14 +125,14 @@ handle_info(timeout, State) -> gen_server:cast(couch_proc_manager, {os_proc_idle, self()}), erlang:garbage_collect(), {noreply, State, State#evstate.idle}; -handle_info({'EXIT',_,normal}, State) -> +handle_info({'EXIT', _, normal}, State) -> {noreply, State, State#evstate.idle}; -handle_info({'EXIT',_,Reason}, State) -> +handle_info({'EXIT', _, Reason}, State) -> {stop, Reason, State}. terminate(_Reason, _State) -> ok. code_change(_OldVersion, State, _Extra) -> {ok, State}. -run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) -> +run(#evstate{list_pid = Pid} = State, [<<"list_row">>, Row]) when is_pid(Pid) -> Pid ! {self(), list_row, Row}, receive {Pid, chunks, Data} -> @@ -137,124 +144,137 @@ run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) -> throw({timeout, list_cleanup}) end, process_flag(trap_exit, erlang:get(do_trap)), - {State#evstate{list_pid=nil}, [<<"end">>, Data]} + {State#evstate{list_pid = nil}, [<<"end">>, Data]} after State#evstate.timeout -> throw({timeout, list_row}) end; -run(#evstate{list_pid=Pid}=State, [<<"list_end">>]) when is_pid(Pid) -> +run(#evstate{list_pid = Pid} = State, [<<"list_end">>]) when is_pid(Pid) -> Pid ! {self(), list_end}, Resp = - receive - {Pid, list_end, Data} -> - receive - {'EXIT', Pid, normal} -> ok - after State#evstate.timeout -> - throw({timeout, list_cleanup}) - end, - [<<"end">>, Data] - after State#evstate.timeout -> - throw({timeout, list_end}) - end, + receive + {Pid, list_end, Data} -> + receive + {'EXIT', Pid, normal} -> ok + after State#evstate.timeout -> + throw({timeout, list_cleanup}) + end, + [<<"end">>, Data] + after State#evstate.timeout -> + throw({timeout, list_end}) + end, process_flag(trap_exit, erlang:get(do_trap)), - {State#evstate{list_pid=nil}, Resp}; -run(#evstate{list_pid=Pid}=State, _Command) when is_pid(Pid) -> + {State#evstate{list_pid = nil}, Resp}; +run(#evstate{list_pid = Pid} = State, _Command) when is_pid(Pid) -> {State, [<<"error">>, list_error, list_error]}; -run(#evstate{ddocs=DDocs}, [<<"reset">>]) -> - {#evstate{ddocs=DDocs}, true}; -run(#evstate{ddocs=DDocs, idle=Idle}, [<<"reset">>, QueryConfig]) -> +run(#evstate{ddocs = DDocs}, [<<"reset">>]) -> + {#evstate{ddocs = DDocs}, true}; +run(#evstate{ddocs = DDocs, idle = Idle}, [<<"reset">>, QueryConfig]) -> NewState = #evstate{ ddocs = DDocs, query_config = QueryConfig, idle = Idle }, {NewState, true}; -run(#evstate{funs=Funs}=State, [<<"add_fun">> , BinFunc]) -> +run(#evstate{funs = Funs} = State, [<<"add_fun">>, BinFunc]) -> FunInfo = makefun(State, BinFunc), - {State#evstate{funs=Funs ++ [FunInfo]}, true}; -run(State, [<<"map_doc">> , Doc]) -> - Resp = lists:map(fun({Sig, Fun}) -> - erlang:put(Sig, []), - Fun(Doc), - lists:reverse(erlang:get(Sig)) - end, State#evstate.funs), + {State#evstate{funs = Funs ++ [FunInfo]}, true}; +run(State, [<<"map_doc">>, Doc]) -> + Resp = lists:map( + fun({Sig, Fun}) -> + erlang:put(Sig, []), + Fun(Doc), + lists:reverse(erlang:get(Sig)) + end, + State#evstate.funs + ), {State, Resp}; run(State, [<<"reduce">>, Funs, KVs]) -> {Keys, Vals} = - lists:foldl(fun([K, V], {KAcc, VAcc}) -> - {[K | KAcc], [V | VAcc]} - end, {[], []}, KVs), + lists:foldl( + fun([K, V], {KAcc, VAcc}) -> + {[K | KAcc], [V | VAcc]} + end, + {[], []}, + KVs + ), Keys2 = lists:reverse(Keys), Vals2 = lists:reverse(Vals), {State, catch reduce(State, Funs, Keys2, Vals2, false)}; run(State, [<<"rereduce">>, Funs, Vals]) -> {State, catch reduce(State, Funs, null, Vals, true)}; -run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) -> +run(#evstate{ddocs = DDocs} = State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) -> DDocs2 = store_ddoc(DDocs, DDocId, DDoc), - {State#evstate{ddocs=DDocs2}, true}; -run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, DDocId | Rest]) -> + {State#evstate{ddocs = DDocs2}, true}; +run(#evstate{ddocs = DDocs} = State, [<<"ddoc">>, DDocId | Rest]) -> DDoc = load_ddoc(DDocs, DDocId), ddoc(State, DDoc, Rest); run(_, Unknown) -> couch_log:error("Native Process: Unknown command: ~p~n", [Unknown]), throw({error, unknown_command}). - + ddoc(State, {DDoc}, [FunPath, Args]) -> % load fun from the FunPath - BFun = lists:foldl(fun - (Key, {Props}) when is_list(Props) -> - couch_util:get_value(Key, Props, nil); - (_Key, Fun) when is_binary(Fun) -> - Fun; - (_Key, nil) -> - throw({error, not_found}); - (_Key, _Fun) -> - throw({error, malformed_ddoc}) - end, {DDoc}, FunPath), + BFun = lists:foldl( + fun + (Key, {Props}) when is_list(Props) -> + couch_util:get_value(Key, Props, nil); + (_Key, Fun) when is_binary(Fun) -> + Fun; + (_Key, nil) -> + throw({error, not_found}); + (_Key, _Fun) -> + throw({error, malformed_ddoc}) + end, + {DDoc}, + FunPath + ), ddoc(State, makefun(State, BFun, {DDoc}), FunPath, Args). ddoc(State, {_, Fun}, [<<"validate_doc_update">>], Args) -> {State, (catch apply(Fun, Args))}; ddoc(State, {_, Fun}, [<<"rewrites">>], Args) -> {State, (catch apply(Fun, Args))}; -ddoc(State, {_, Fun}, [<<"filters">>|_], [Docs, Req]) -> +ddoc(State, {_, Fun}, [<<"filters">> | _], [Docs, Req]) -> FilterFunWrapper = fun(Doc) -> case catch Fun(Doc, Req) of - true -> true; - false -> false; - {'EXIT', Error} -> couch_log:error("~p", [Error]) + true -> true; + false -> false; + {'EXIT', Error} -> couch_log:error("~p", [Error]) end end, Resp = lists:map(FilterFunWrapper, Docs), {State, [true, Resp]}; -ddoc(State, {_, Fun}, [<<"views">>|_], [Docs]) -> +ddoc(State, {_, Fun}, [<<"views">> | _], [Docs]) -> MapFunWrapper = fun(Doc) -> case catch Fun(Doc) of - undefined -> true; - ok -> false; - false -> false; - [_|_] -> true; - {'EXIT', Error} -> couch_log:error("~p", [Error]) + undefined -> true; + ok -> false; + false -> false; + [_ | _] -> true; + {'EXIT', Error} -> couch_log:error("~p", [Error]) end end, Resp = lists:map(MapFunWrapper, Docs), {State, [true, Resp]}; -ddoc(State, {_, Fun}, [<<"shows">>|_], Args) -> - Resp = case (catch apply(Fun, Args)) of - FunResp when is_list(FunResp) -> - FunResp; - {FunResp} -> - [<<"resp">>, {FunResp}]; - FunResp -> - FunResp - end, +ddoc(State, {_, Fun}, [<<"shows">> | _], Args) -> + Resp = + case (catch apply(Fun, Args)) of + FunResp when is_list(FunResp) -> + FunResp; + {FunResp} -> + [<<"resp">>, {FunResp}]; + FunResp -> + FunResp + end, {State, Resp}; -ddoc(State, {_, Fun}, [<<"updates">>|_], Args) -> - Resp = case (catch apply(Fun, Args)) of - [JsonDoc, JsonResp] -> - [<<"up">>, JsonDoc, JsonResp] - end, +ddoc(State, {_, Fun}, [<<"updates">> | _], Args) -> + Resp = + case (catch apply(Fun, Args)) of + [JsonDoc, JsonResp] -> + [<<"up">>, JsonDoc, JsonResp] + end, {State, Resp}; -ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) -> +ddoc(State, {Sig, Fun}, [<<"lists">> | _], Args) -> Self = self(), SpawnFun = fun() -> LastChunk = (catch apply(Fun, Args)), @@ -270,22 +290,22 @@ ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) -> ok end, LastChunks = - case erlang:get(Sig) of - undefined -> [LastChunk]; - OtherChunks -> [LastChunk | OtherChunks] - end, + case erlang:get(Sig) of + undefined -> [LastChunk]; + OtherChunks -> [LastChunk | OtherChunks] + end, Self ! {self(), list_end, lists:reverse(LastChunks)} end, erlang:put(do_trap, process_flag(trap_exit, true)), Pid = spawn_link(SpawnFun), Resp = - receive - {Pid, start, Chunks, JsonResp} -> - [<<"start">>, Chunks, JsonResp] - after State#evstate.timeout -> - throw({timeout, list_start}) - end, - {State#evstate{list_pid=Pid}, Resp}. + receive + {Pid, start, Chunks, JsonResp} -> + [<<"start">>, Chunks, JsonResp] + after State#evstate.timeout -> + throw({timeout, list_start}) + end, + {State#evstate{list_pid = Pid}, Resp}. store_ddoc(DDocs, DDocId, DDoc) -> dict:store(DDocId, DDoc, DDocs). @@ -293,7 +313,11 @@ load_ddoc(DDocs, DDocId) -> try dict:fetch(DDocId, DDocs) of {DDoc} -> {DDoc} catch - _:_Else -> throw({error, ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s",[DDocId]))}) + _:_Else -> + throw( + {error, + ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s", [DDocId]))} + ) end. bindings(State, Sig) -> @@ -316,10 +340,10 @@ bindings(State, Sig, DDoc) -> Send = fun(Chunk) -> Curr = - case erlang:get(Sig) of - undefined -> []; - Else -> Else - end, + case erlang:get(Sig) of + undefined -> []; + Else -> Else + end, erlang:put(Sig, [Chunk | Curr]) end, @@ -329,10 +353,10 @@ bindings(State, Sig, DDoc) -> ok; _ -> Chunks = - case erlang:get(Sig) of - undefined -> []; - CurrChunks -> CurrChunks - end, + case erlang:get(Sig) of + undefined -> []; + CurrChunks -> CurrChunks + end, Self ! {self(), chunks, lists:reverse(Chunks)} end, erlang:put(Sig, []), @@ -343,7 +367,7 @@ bindings(State, Sig, DDoc) -> throw({timeout, list_pid_getrow}) end end, - + FoldRows = fun(Fun, Acc) -> foldrows(GetRow, Fun, Acc) end, Bindings = [ @@ -357,7 +381,8 @@ bindings(State, Sig, DDoc) -> case DDoc of {_Props} -> Bindings ++ [{'DDoc', DDoc}]; - _Else -> Bindings + _Else -> + Bindings end. % thanks to erlview, via: @@ -373,30 +398,41 @@ makefun(State, Source, {DDoc}) -> makefun(_State, Source, BindFuns) when is_list(BindFuns) -> FunStr = binary_to_list(Source), {ok, Tokens, _} = erl_scan:string(FunStr), - Form = case (catch erl_parse:parse_exprs(Tokens)) of - {ok, [ParsedForm]} -> - ParsedForm; - {error, {LineNum, _Mod, [Mesg, Params]}}=Error -> - couch_log:error("Syntax error on line: ~p~n~s~p~n", - [LineNum, Mesg, Params]), - throw(Error) - end, - Bindings = lists:foldl(fun({Name, Fun}, Acc) -> - erl_eval:add_binding(Name, Fun, Acc) - end, erl_eval:new_bindings(), BindFuns), + Form = + case (catch erl_parse:parse_exprs(Tokens)) of + {ok, [ParsedForm]} -> + ParsedForm; + {error, {LineNum, _Mod, [Mesg, Params]}} = Error -> + couch_log:error( + "Syntax error on line: ~p~n~s~p~n", + [LineNum, Mesg, Params] + ), + throw(Error) + end, + Bindings = lists:foldl( + fun({Name, Fun}, Acc) -> + erl_eval:add_binding(Name, Fun, Acc) + end, + erl_eval:new_bindings(), + BindFuns + ), {value, Fun, _} = erl_eval:expr(Form, Bindings), Fun. reduce(State, BinFuns, Keys, Vals, ReReduce) -> - Funs = case is_list(BinFuns) of - true -> - lists:map(fun(BF) -> makefun(State, BF) end, BinFuns); - _ -> - [makefun(State, BinFuns)] - end, - Reds = lists:map(fun({_Sig, Fun}) -> - Fun(Keys, Vals, ReReduce) - end, Funs), + Funs = + case is_list(BinFuns) of + true -> + lists:map(fun(BF) -> makefun(State, BF) end, BinFuns); + _ -> + [makefun(State, BinFuns)] + end, + Reds = lists:map( + fun({_Sig, Fun}) -> + Fun(Keys, Vals, ReReduce) + end, + Funs + ), [true, Reds]. foldrows(GetRow, ProcRow, Acc) -> @@ -416,15 +452,15 @@ start_list_resp(Self, Sig) -> case erlang:get(list_started) of undefined -> Headers = - case erlang:get(list_headers) of - undefined -> {[{<<"headers">>, {[]}}]}; - CurrHdrs -> CurrHdrs - end, + case erlang:get(list_headers) of + undefined -> {[{<<"headers">>, {[]}}]}; + CurrHdrs -> CurrHdrs + end, Chunks = - case erlang:get(Sig) of - undefined -> []; - CurrChunks -> CurrChunks - end, + case erlang:get(Sig) of + undefined -> []; + CurrChunks -> CurrChunks + end, Self ! {self(), start, lists:reverse(Chunks), Headers}, erlang:put(list_started, true), erlang:put(Sig, []), diff --git a/src/couch/src/couch_os_process.erl b/src/couch/src/couch_os_process.erl index 63a241433..da5df5134 100644 --- a/src/couch/src/couch_os_process.erl +++ b/src/couch/src/couch_os_process.erl @@ -23,14 +23,14 @@ -define(PORT_OPTIONS, [stream, {line, 4096}, binary, exit_status, hide]). --record(os_proc, - {command, - port, - writer, - reader, - timeout=5000, - idle - }). +-record(os_proc, { + command, + port, + writer, + reader, + timeout = 5000, + idle +}). start_link(Command) -> start_link(Command, []). @@ -55,7 +55,7 @@ prompt(Pid, Data) -> {ok, Result} -> Result; Error -> - couch_log:error("OS Process Error ~p :: ~p",[Pid,Error]), + couch_log:error("OS Process Error ~p :: ~p", [Pid, Error]), throw(Error) end. @@ -72,21 +72,21 @@ readline(#os_proc{} = OsProc) -> Res. readline(#os_proc{port = Port} = OsProc, Acc) -> receive - {Port, {data, {noeol, Data}}} when is_binary(Acc) -> - readline(OsProc, <<Acc/binary,Data/binary>>); - {Port, {data, {noeol, Data}}} when is_binary(Data) -> - readline(OsProc, Data); - {Port, {data, {noeol, Data}}} -> - readline(OsProc, [Data|Acc]); - {Port, {data, {eol, <<Data/binary>>}}} when is_binary(Acc) -> - [<<Acc/binary,Data/binary>>]; - {Port, {data, {eol, Data}}} when is_binary(Data) -> - [Data]; - {Port, {data, {eol, Data}}} -> - lists:reverse(Acc, Data); - {Port, Err} -> - catch port_close(Port), - throw({os_process_error, Err}) + {Port, {data, {noeol, Data}}} when is_binary(Acc) -> + readline(OsProc, <<Acc/binary, Data/binary>>); + {Port, {data, {noeol, Data}}} when is_binary(Data) -> + readline(OsProc, Data); + {Port, {data, {noeol, Data}}} -> + readline(OsProc, [Data | Acc]); + {Port, {data, {eol, <<Data/binary>>}}} when is_binary(Acc) -> + [<<Acc/binary, Data/binary>>]; + {Port, {data, {eol, Data}}} when is_binary(Data) -> + [Data]; + {Port, {data, {eol, Data}}} -> + lists:reverse(Acc, Data); + {Port, Err} -> + catch port_close(Port), + throw({os_process_error, Err}) after OsProc#os_proc.timeout -> catch port_close(Port), throw({os_process_error, "OS process timed out."}) @@ -95,8 +95,10 @@ readline(#os_proc{port = Port} = OsProc, Acc) -> % Standard JSON functions writejson(OsProc, Data) when is_record(OsProc, os_proc) -> JsonData = ?JSON_ENCODE(Data), - couch_log:debug("OS Process ~p Input :: ~s", - [OsProc#os_proc.port, JsonData]), + couch_log:debug( + "OS Process ~p Input :: ~s", + [OsProc#os_proc.port, JsonData] + ), true = writeline(OsProc, JsonData). readjson(OsProc) when is_record(OsProc, os_proc) -> @@ -109,24 +111,28 @@ readjson(OsProc) when is_record(OsProc, os_proc) -> % command, otherwise return the raw JSON line to the caller. pick_command(Line) catch - throw:abort -> - {json, Line}; - throw:{cmd, _Cmd} -> - case ?JSON_DECODE(Line) of - [<<"log">>, Msg] when is_binary(Msg) -> - % we got a message to log. Log it and continue - couch_log:info("OS Process ~p Log :: ~s", - [OsProc#os_proc.port, Msg]), - readjson(OsProc); - [<<"error">>, Id, Reason] -> - throw({error, {couch_util:to_existing_atom(Id),Reason}}); - [<<"fatal">>, Id, Reason] -> - couch_log:info("OS Process ~p Fatal Error :: ~s ~p", - [OsProc#os_proc.port, Id, Reason]), - throw({couch_util:to_existing_atom(Id),Reason}); - _Result -> - {json, Line} - end + throw:abort -> + {json, Line}; + throw:{cmd, _Cmd} -> + case ?JSON_DECODE(Line) of + [<<"log">>, Msg] when is_binary(Msg) -> + % we got a message to log. Log it and continue + couch_log:info( + "OS Process ~p Log :: ~s", + [OsProc#os_proc.port, Msg] + ), + readjson(OsProc); + [<<"error">>, Id, Reason] -> + throw({error, {couch_util:to_existing_atom(Id), Reason}}); + [<<"fatal">>, Id, Reason] -> + couch_log:info( + "OS Process ~p Fatal Error :: ~s ~p", + [OsProc#os_proc.port, Id, Reason] + ), + throw({couch_util:to_existing_atom(Id), Reason}); + _Result -> + {json, Line} + end end. pick_command(Line) -> @@ -146,7 +152,6 @@ pick_command1(<<"fatal">> = Cmd) -> pick_command1(_) -> throw(abort). - % gen_server API init([Command, Options, PortOptions]) -> couch_io_logger:start(os:getenv("COUCHDB_IO_LOG_DIR")), @@ -155,34 +160,38 @@ init([Command, Options, PortOptions]) -> V = config:get("query_server_config", "os_process_idle_limit", "300"), IdleLimit = list_to_integer(V) * 1000, BaseProc = #os_proc{ - command=Command, - port=open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions), - writer=fun ?MODULE:writejson/2, - reader=fun ?MODULE:readjson/1, - idle=IdleLimit + command = Command, + port = open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions), + writer = fun ?MODULE:writejson/2, + reader = fun ?MODULE:readjson/1, + idle = IdleLimit }, KillCmd = iolist_to_binary(readline(BaseProc)), Pid = self(), couch_log:debug("OS Process Start :: ~p", [BaseProc#os_proc.port]), spawn(fun() -> - % this ensure the real os process is killed when this process dies. - erlang:monitor(process, Pid), - killer(?b2l(KillCmd)) - end), + % this ensure the real os process is killed when this process dies. + erlang:monitor(process, Pid), + killer(?b2l(KillCmd)) + end), OsProc = - lists:foldl(fun(Opt, Proc) -> - case Opt of - {writer, Writer} when is_function(Writer) -> - Proc#os_proc{writer=Writer}; - {reader, Reader} when is_function(Reader) -> - Proc#os_proc{reader=Reader}; - {timeout, TimeOut} when is_integer(TimeOut) -> - Proc#os_proc{timeout=TimeOut} - end - end, BaseProc, Options), + lists:foldl( + fun(Opt, Proc) -> + case Opt of + {writer, Writer} when is_function(Writer) -> + Proc#os_proc{writer = Writer}; + {reader, Reader} when is_function(Reader) -> + Proc#os_proc{reader = Reader}; + {timeout, TimeOut} when is_integer(TimeOut) -> + Proc#os_proc{timeout = TimeOut} + end + end, + BaseProc, + Options + ), {ok, OsProc, IdleLimit}. -terminate(Reason, #os_proc{port=Port}) -> +terminate(Reason, #os_proc{port = Port}) -> catch port_close(Port), case Reason of normal -> @@ -192,10 +201,10 @@ terminate(Reason, #os_proc{port=Port}) -> end, ok. -handle_call({set_timeout, TimeOut}, _From, #os_proc{idle=Idle}=OsProc) -> - {reply, ok, OsProc#os_proc{timeout=TimeOut}, Idle}; -handle_call({prompt, Data}, _From, #os_proc{idle=Idle}=OsProc) -> - #os_proc{writer=Writer, reader=Reader} = OsProc, +handle_call({set_timeout, TimeOut}, _From, #os_proc{idle = Idle} = OsProc) -> + {reply, ok, OsProc#os_proc{timeout = TimeOut}, Idle}; +handle_call({prompt, Data}, _From, #os_proc{idle = Idle} = OsProc) -> + #os_proc{writer = Writer, reader = Reader} = OsProc, try Writer(OsProc, Data), {reply, {ok, Reader(OsProc)}, OsProc, Idle} @@ -210,7 +219,7 @@ handle_call({prompt, Data}, _From, #os_proc{idle=Idle}=OsProc) -> garbage_collect() end. -handle_cast({send, Data}, #os_proc{writer=Writer, idle=Idle}=OsProc) -> +handle_cast({send, Data}, #os_proc{writer = Writer, idle = Idle} = OsProc) -> try Writer(OsProc, Data), {noreply, OsProc, Idle} @@ -219,31 +228,31 @@ handle_cast({send, Data}, #os_proc{writer=Writer, idle=Idle}=OsProc) -> couch_log:error("Failed sending data: ~p -> ~p", [Data, OsError]), {stop, normal, OsProc} end; -handle_cast(garbage_collect, #os_proc{idle=Idle}=OsProc) -> +handle_cast(garbage_collect, #os_proc{idle = Idle} = OsProc) -> erlang:garbage_collect(), {noreply, OsProc, Idle}; handle_cast(stop, OsProc) -> {stop, normal, OsProc}; -handle_cast(Msg, #os_proc{idle=Idle}=OsProc) -> +handle_cast(Msg, #os_proc{idle = Idle} = OsProc) -> couch_log:debug("OS Proc: Unknown cast: ~p", [Msg]), {noreply, OsProc, Idle}. -handle_info(timeout, #os_proc{idle=Idle}=OsProc) -> +handle_info(timeout, #os_proc{idle = Idle} = OsProc) -> gen_server:cast(couch_proc_manager, {os_proc_idle, self()}), erlang:garbage_collect(), {noreply, OsProc, Idle}; -handle_info({Port, {exit_status, 0}}, #os_proc{port=Port}=OsProc) -> +handle_info({Port, {exit_status, 0}}, #os_proc{port = Port} = OsProc) -> couch_log:info("OS Process terminated normally", []), {stop, normal, OsProc}; -handle_info({Port, {exit_status, Status}}, #os_proc{port=Port}=OsProc) -> +handle_info({Port, {exit_status, Status}}, #os_proc{port = Port} = OsProc) -> couch_log:error("OS Process died with status: ~p", [Status]), {stop, {exit_status, Status}, OsProc}; -handle_info(Msg, #os_proc{idle=Idle}=OsProc) -> +handle_info(Msg, #os_proc{idle = Idle} = OsProc) -> couch_log:debug("OS Proc: Unknown info: ~p", [Msg]), {noreply, OsProc, Idle}. -code_change(_, {os_proc, Cmd, Port, W, R, Timeout} , _) -> - V = config:get("query_server_config","os_process_idle_limit","300"), +code_change(_, {os_proc, Cmd, Port, W, R, Timeout}, _) -> + V = config:get("query_server_config", "os_process_idle_limit", "300"), State = #os_proc{ command = Cmd, port = Port, @@ -257,9 +266,9 @@ code_change(_OldVsn, State, _Extra) -> {ok, State}. killer(KillCmd) -> - receive _ -> - os:cmd(KillCmd) + receive + _ -> + os:cmd(KillCmd) after 1000 -> ?MODULE:killer(KillCmd) end. - diff --git a/src/couch/src/couch_partition.erl b/src/couch/src/couch_partition.erl index f2efcaa5e..101b5b324 100644 --- a/src/couch/src/couch_partition.erl +++ b/src/couch/src/couch_partition.erl @@ -12,7 +12,6 @@ -module(couch_partition). - -export([ extract/1, from_docid/1, @@ -29,10 +28,8 @@ hash/1 ]). - -include_lib("couch/include/couch_db.hrl"). - extract(Value) when is_binary(Value) -> case binary:split(Value, <<":">>) of [Partition, Rest] -> @@ -40,11 +37,9 @@ extract(Value) when is_binary(Value) -> _ -> undefined end; - extract(_) -> undefined. - from_docid(DocId) -> case extract(DocId) of undefined -> @@ -53,7 +48,6 @@ from_docid(DocId) -> Partition end. - is_member(DocId, Partition) -> case extract(DocId) of {Partition, _} -> @@ -62,53 +56,52 @@ is_member(DocId, Partition) -> false end. - start_key(Partition) -> <<Partition/binary, ":">>. - end_key(Partition) -> <<Partition/binary, ";">>. - shard_key(Partition) -> <<Partition/binary, ":foo">>. - validate_dbname(DbName, Options) when is_list(DbName) -> validate_dbname(?l2b(DbName), Options); validate_dbname(DbName, Options) when is_binary(DbName) -> Props = couch_util:get_value(props, Options, []), IsPartitioned = couch_util:get_value(partitioned, Props, false), - if not IsPartitioned -> ok; true -> - - DbsDbName = config:get("mem3", "shards_db", "_dbs"), - NodesDbName = config:get("mem3", "nodes_db", "_nodes"), - UsersDbSuffix = config:get("couchdb", "users_db_suffix", "_users"), - Suffix = couch_db:dbname_suffix(DbName), + if + not IsPartitioned -> + ok; + true -> + DbsDbName = config:get("mem3", "shards_db", "_dbs"), + NodesDbName = config:get("mem3", "nodes_db", "_nodes"), + UsersDbSuffix = config:get("couchdb", "users_db_suffix", "_users"), + Suffix = couch_db:dbname_suffix(DbName), - SysDbNames = [ + SysDbNames = [ iolist_to_binary(DbsDbName), iolist_to_binary(NodesDbName) | ?SYSTEM_DATABASES ], - Suffices = [ + Suffices = [ <<"_replicator">>, <<"_users">>, iolist_to_binary(UsersDbSuffix) ], - IsSysDb = lists:member(DbName, SysDbNames) - orelse lists:member(Suffix, Suffices), + IsSysDb = + lists:member(DbName, SysDbNames) orelse + lists:member(Suffix, Suffices), - if not IsSysDb -> ok; true -> - throw({bad_request, <<"Cannot partition a system database">>}) - end + if + not IsSysDb -> ok; + true -> throw({bad_request, <<"Cannot partition a system database">>}) + end end. - validate_docid(<<"_design/", _/binary>>) -> ok; validate_docid(<<"_local/", _/binary>>) -> @@ -125,7 +118,6 @@ validate_docid(DocId) when is_binary(DocId) -> couch_doc:validate_docid(PartitionedDocId) end. - validate_partition(<<>>) -> throw({illegal_partition, <<"Partition must not be empty">>}); validate_partition(Partition) when is_binary(Partition) -> @@ -153,7 +145,6 @@ validate_partition(Partition) when is_binary(Partition) -> validate_partition(_) -> throw({illegal_partition, <<"Partition must be a string">>}). - % Document ids that start with an underscore % (i.e., _local and _design) do not contain a % partition and thus do not use the partition diff --git a/src/couch/src/couch_passwords.erl b/src/couch/src/couch_passwords.erl index 55ffb359f..828d2f68b 100644 --- a/src/couch/src/couch_passwords.erl +++ b/src/couch/src/couch_passwords.erl @@ -40,98 +40,144 @@ hash_admin_password(ClearPassword) when is_binary(ClearPassword) -> Scheme = chttpd_util:get_chttpd_auth_config("password_scheme", "pbkdf2"), hash_admin_password(Scheme, ClearPassword). -hash_admin_password("simple", ClearPassword) -> % deprecated +% deprecated +hash_admin_password("simple", ClearPassword) -> Salt = couch_uuids:random(), Hash = crypto:hash(sha, <<ClearPassword/binary, Salt/binary>>), ?l2b("-hashed-" ++ couch_util:to_hex(Hash) ++ "," ++ ?b2l(Salt)); hash_admin_password("pbkdf2", ClearPassword) -> Iterations = chttpd_util:get_chttpd_auth_config("iterations", "10"), Salt = couch_uuids:random(), - DerivedKey = couch_passwords:pbkdf2(couch_util:to_binary(ClearPassword), - Salt, list_to_integer(Iterations)), - ?l2b("-pbkdf2-" ++ ?b2l(DerivedKey) ++ "," - ++ ?b2l(Salt) ++ "," - ++ Iterations). + DerivedKey = couch_passwords:pbkdf2( + couch_util:to_binary(ClearPassword), + Salt, + list_to_integer(Iterations) + ), + ?l2b( + "-pbkdf2-" ++ ?b2l(DerivedKey) ++ "," ++ + ?b2l(Salt) ++ "," ++ + Iterations + ). -spec get_unhashed_admins() -> list(). get_unhashed_admins() -> lists:filter( - fun({_User, "-hashed-" ++ _}) -> - false; % already hashed - ({_User, "-pbkdf2-" ++ _}) -> - false; % already hashed - ({_User, _ClearPassword}) -> - true + fun + ({_User, "-hashed-" ++ _}) -> + % already hashed + false; + ({_User, "-pbkdf2-" ++ _}) -> + % already hashed + false; + ({_User, _ClearPassword}) -> + true end, - config:get("admins")). + config:get("admins") + ). %% Current scheme, much stronger. -spec pbkdf2(binary(), binary(), integer()) -> binary(). -pbkdf2(Password, Salt, Iterations) when is_binary(Password), - is_binary(Salt), - is_integer(Iterations), - Iterations > 0 -> +pbkdf2(Password, Salt, Iterations) when + is_binary(Password), + is_binary(Salt), + is_integer(Iterations), + Iterations > 0 +-> {ok, Result} = pbkdf2(Password, Salt, Iterations, ?SHA1_OUTPUT_LENGTH), Result; -pbkdf2(Password, Salt, Iterations) when is_binary(Salt), - is_integer(Iterations), - Iterations > 0 -> +pbkdf2(Password, Salt, Iterations) when + is_binary(Salt), + is_integer(Iterations), + Iterations > 0 +-> Msg = io_lib:format("Password value of '~p' is invalid.", [Password]), throw({forbidden, Msg}); -pbkdf2(Password, Salt, Iterations) when is_binary(Password), - is_integer(Iterations), - Iterations > 0 -> +pbkdf2(Password, Salt, Iterations) when + is_binary(Password), + is_integer(Iterations), + Iterations > 0 +-> Msg = io_lib:format("Salt value of '~p' is invalid.", [Salt]), throw({forbidden, Msg}). --spec pbkdf2(binary(), binary(), integer(), integer()) - -> {ok, binary()} | {error, derived_key_too_long}. -pbkdf2(_Password, _Salt, _Iterations, DerivedLength) - when DerivedLength > ?MAX_DERIVED_KEY_LENGTH -> +-spec pbkdf2(binary(), binary(), integer(), integer()) -> + {ok, binary()} | {error, derived_key_too_long}. +pbkdf2(_Password, _Salt, _Iterations, DerivedLength) when + DerivedLength > ?MAX_DERIVED_KEY_LENGTH +-> {error, derived_key_too_long}; -pbkdf2(Password, Salt, Iterations, DerivedLength) when is_binary(Password), - is_binary(Salt), - is_integer(Iterations), - Iterations > 0, - is_integer(DerivedLength) -> +pbkdf2(Password, Salt, Iterations, DerivedLength) when + is_binary(Password), + is_binary(Salt), + is_integer(Iterations), + Iterations > 0, + is_integer(DerivedLength) +-> L = ceiling(DerivedLength / ?SHA1_OUTPUT_LENGTH), - <<Bin:DerivedLength/binary,_/binary>> = + <<Bin:DerivedLength/binary, _/binary>> = iolist_to_binary(pbkdf2(Password, Salt, Iterations, L, 1, [])), {ok, ?l2b(couch_util:to_hex(Bin))}. --spec pbkdf2(binary(), binary(), integer(), integer(), integer(), iolist()) - -> iolist(). -pbkdf2(_Password, _Salt, _Iterations, BlockCount, BlockIndex, Acc) - when BlockIndex > BlockCount -> +-spec pbkdf2(binary(), binary(), integer(), integer(), integer(), iolist()) -> + iolist(). +pbkdf2(_Password, _Salt, _Iterations, BlockCount, BlockIndex, Acc) when + BlockIndex > BlockCount +-> lists:reverse(Acc); pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex, Acc) -> Block = pbkdf2(Password, Salt, Iterations, BlockIndex, 1, <<>>, <<>>), - pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex + 1, [Block|Acc]). + pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex + 1, [Block | Acc]). --spec pbkdf2(binary(), binary(), integer(), integer(), integer(), - binary(), binary()) -> binary(). -pbkdf2(_Password, _Salt, Iterations, _BlockIndex, Iteration, _Prev, Acc) - when Iteration > Iterations -> +-spec pbkdf2( + binary(), + binary(), + integer(), + integer(), + integer(), + binary(), + binary() +) -> binary(). +pbkdf2(_Password, _Salt, Iterations, _BlockIndex, Iteration, _Prev, Acc) when + Iteration > Iterations +-> Acc; pbkdf2(Password, Salt, Iterations, BlockIndex, 1, _Prev, _Acc) -> - InitialBlock = couch_util:hmac(sha, Password, - <<Salt/binary,BlockIndex:32/integer>>), - pbkdf2(Password, Salt, Iterations, BlockIndex, 2, - InitialBlock, InitialBlock); + InitialBlock = couch_util:hmac( + sha, + Password, + <<Salt/binary, BlockIndex:32/integer>> + ), + pbkdf2( + Password, + Salt, + Iterations, + BlockIndex, + 2, + InitialBlock, + InitialBlock + ); pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration, Prev, Acc) -> Next = couch_util:hmac(sha, Password, Prev), - pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration + 1, - Next, crypto:exor(Next, Acc)). + pbkdf2( + Password, + Salt, + Iterations, + BlockIndex, + Iteration + 1, + Next, + crypto:exor(Next, Acc) + ). %% verify two lists for equality without short-circuits to avoid timing attacks. -spec verify(string(), string(), integer()) -> boolean(). -verify([X|RestX], [Y|RestY], Result) -> +verify([X | RestX], [Y | RestY], Result) -> verify(RestX, RestY, (X bxor Y) bor Result); verify([], [], Result) -> Result == 0. --spec verify(binary(), binary()) -> boolean(); - (list(), list()) -> boolean(). +-spec verify + (binary(), binary()) -> boolean(); + (list(), list()) -> boolean(). verify(<<X/binary>>, <<Y/binary>>) -> verify(?b2l(X), ?b2l(Y)); verify(X, Y) when is_list(X) and is_list(Y) -> @@ -141,7 +187,8 @@ verify(X, Y) when is_list(X) and is_list(Y) -> false -> false end; -verify(_X, _Y) -> false. +verify(_X, _Y) -> + false. -spec ceiling(number()) -> integer(). ceiling(X) -> diff --git a/src/couch/src/couch_primary_sup.erl b/src/couch/src/couch_primary_sup.erl index 73c3de710..4f2917f98 100644 --- a/src/couch/src/couch_primary_sup.erl +++ b/src/couch/src/couch_primary_sup.erl @@ -15,30 +15,20 @@ -export([init/1, start_link/0]). start_link() -> - supervisor:start_link({local,couch_primary_services}, ?MODULE, []). + supervisor:start_link({local, couch_primary_services}, ?MODULE, []). init([]) -> - Children = [ - {couch_task_status, - {couch_task_status, start_link, []}, - permanent, - brutal_kill, - worker, - [couch_task_status]} - ] ++ couch_servers(), + Children = + [ + {couch_task_status, {couch_task_status, start_link, []}, permanent, brutal_kill, worker, + [couch_task_status]} + ] ++ couch_servers(), {ok, {{one_for_one, 10, 3600}, Children}}. - couch_servers() -> N = couch_server:num_servers(), [couch_server(I) || I <- lists:seq(1, N)]. couch_server(N) -> Name = couch_server:couch_server(N), - {Name, - {couch_server, sup_start_link, [N]}, - permanent, - brutal_kill, - worker, - [couch_server] - }. + {Name, {couch_server, sup_start_link, [N]}, permanent, brutal_kill, worker, [couch_server]}. diff --git a/src/couch/src/couch_proc_manager.erl b/src/couch/src/couch_proc_manager.erl index e7a25a6d2..6d86c16a7 100644 --- a/src/couch/src/couch_proc_manager.erl +++ b/src/couch/src/couch_proc_manager.erl @@ -60,7 +60,7 @@ -record(client, { timestamp :: os:timestamp() | '_', - from :: undefined | {pid(), reference()} | '_', + from :: undefined | {pid(), reference()} | '_', lang :: binary() | '_', ddoc :: #doc{} | '_', ddoc_key :: undefined | {DDocId :: docid(), Rev :: revision()} | '_' @@ -77,27 +77,21 @@ t0 = os:timestamp() }). - start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). - get_proc_count() -> gen_server:call(?MODULE, get_proc_count). - get_stale_proc_count() -> gen_server:call(?MODULE, get_stale_proc_count). - reload() -> gen_server:call(?MODULE, set_threshold_ts). - terminate_stale_procs() -> gen_server:call(?MODULE, terminate_stale_procs). - init([]) -> process_flag(trap_exit, true), ok = config:listen_for_changes(?MODULE, undefined), @@ -120,50 +114,48 @@ init([]) -> soft_limit = get_soft_limit() }}. - terminate(_Reason, _State) -> - ets:foldl(fun(#proc_int{pid=P}, _) -> - couch_util:shutdown_sync(P) - end, 0, ?PROCS), + ets:foldl( + fun(#proc_int{pid = P}, _) -> + couch_util:shutdown_sync(P) + end, + 0, + ?PROCS + ), ok. - handle_call(get_proc_count, _From, State) -> NumProcs = ets:info(?PROCS, size), NumOpening = ets:info(?OPENING, size), {reply, NumProcs + NumOpening, State}; - handle_call(get_stale_proc_count, _From, State) -> #state{threshold_ts = T0} = State, - MatchSpec = [{#proc_int{t0='$1', _='_'}, [{'<', '$1', {T0}}], [true]}], + MatchSpec = [{#proc_int{t0 = '$1', _ = '_'}, [{'<', '$1', {T0}}], [true]}], {reply, ets:select_count(?PROCS, MatchSpec), State}; - -handle_call({get_proc, #doc{body={Props}}=DDoc, DDocKey}, From, State) -> +handle_call({get_proc, #doc{body = {Props}} = DDoc, DDocKey}, From, State) -> LangStr = couch_util:get_value(<<"language">>, Props, <<"javascript">>), Lang = couch_util:to_binary(LangStr), - Client = #client{from=From, lang=Lang, ddoc=DDoc, ddoc_key=DDocKey}, + Client = #client{from = From, lang = Lang, ddoc = DDoc, ddoc_key = DDocKey}, add_waiting_client(Client), {noreply, flush_waiters(State, Lang)}; - handle_call({get_proc, LangStr}, From, State) -> Lang = couch_util:to_binary(LangStr), - Client = #client{from=From, lang=Lang}, + Client = #client{from = From, lang = Lang}, add_waiting_client(Client), {noreply, flush_waiters(State, Lang)}; - -handle_call({ret_proc, #proc{client=Ref} = Proc}, _From, State) -> +handle_call({ret_proc, #proc{client = Ref} = Proc}, _From, State) -> erlang:demonitor(Ref, [flush]), - NewState = case ets:lookup(?PROCS, Proc#proc.pid) of - [#proc_int{}=ProcInt] -> - return_proc(State, ProcInt); - [] -> - % Proc must've died and we already - % cleared it out of the table in - % the handle_info clause. - State - end, + NewState = + case ets:lookup(?PROCS, Proc#proc.pid) of + [#proc_int{} = ProcInt] -> + return_proc(State, ProcInt); + [] -> + % Proc must've died and we already + % cleared it out of the table in + % the handle_info clause. + State + end, {reply, true, NewState}; - handle_call(set_threshold_ts, _From, State) -> FoldFun = fun (#proc_int{client = undefined} = Proc, StateAcc) -> @@ -173,7 +165,6 @@ handle_call(set_threshold_ts, _From, State) -> end, NewState = ets:foldl(FoldFun, State, ?PROCS), {reply, ok, NewState#state{threshold_ts = os:timestamp()}}; - handle_call(terminate_stale_procs, _From, #state{threshold_ts = Ts1} = State) -> FoldFun = fun (#proc_int{client = undefined, t0 = Ts2} = Proc, StateAcc) -> @@ -188,26 +179,24 @@ handle_call(terminate_stale_procs, _From, #state{threshold_ts = Ts1} = State) -> end, NewState = ets:foldl(FoldFun, State, ?PROCS), {reply, ok, NewState}; - handle_call(_Call, _From, State) -> {reply, ignored, State}. - -handle_cast({os_proc_idle, Pid}, #state{counts=Counts}=State) -> - NewState = case ets:lookup(?PROCS, Pid) of - [#proc_int{client=undefined, lang=Lang}=Proc] -> - case dict:find(Lang, Counts) of - {ok, Count} when Count >= State#state.soft_limit -> - couch_log:info("Closing idle OS Process: ~p", [Pid]), - remove_proc(State, Proc); - {ok, _} -> - State - end; - _ -> - State - end, +handle_cast({os_proc_idle, Pid}, #state{counts = Counts} = State) -> + NewState = + case ets:lookup(?PROCS, Pid) of + [#proc_int{client = undefined, lang = Lang} = Proc] -> + case dict:find(Lang, Counts) of + {ok, Count} when Count >= State#state.soft_limit -> + couch_log:info("Closing idle OS Process: ~p", [Pid]), + remove_proc(State, Proc); + {ok, _} -> + State + end; + _ -> + State + end, {noreply, NewState}; - handle_cast(reload_config, State) -> NewState = State#state{ config = get_proc_config(), @@ -216,29 +205,24 @@ handle_cast(reload_config, State) -> }, maybe_configure_erlang_native_servers(), {noreply, flush_waiters(NewState)}; - handle_cast(_Msg, State) -> {noreply, State}. - handle_info(shutdown, State) -> {stop, shutdown, State}; - -handle_info({'EXIT', Pid, {spawn_ok, Proc0, {ClientPid,_} = From}}, State) -> +handle_info({'EXIT', Pid, {spawn_ok, Proc0, {ClientPid, _} = From}}, State) -> ets:delete(?OPENING, Pid), link(Proc0#proc_int.pid), Proc = assign_proc(ClientPid, Proc0), gen_server:reply(From, {ok, Proc, State#state.config}), {noreply, State}; - handle_info({'EXIT', Pid, spawn_error}, State) -> - [{Pid, #client{lang=Lang}}] = ets:lookup(?OPENING, Pid), + [{Pid, #client{lang = Lang}}] = ets:lookup(?OPENING, Pid), ets:delete(?OPENING, Pid), NewState = State#state{ counts = dict:update_counter(Lang, -1, State#state.counts) }, {noreply, flush_waiters(NewState, Lang)}; - handle_info({'EXIT', Pid, Reason}, State) -> couch_log:info("~p ~p died ~p", [?MODULE, Pid, Reason]), case ets:lookup(?PROCS, Pid) of @@ -248,25 +232,20 @@ handle_info({'EXIT', Pid, Reason}, State) -> [] -> {noreply, State} end; - handle_info({'DOWN', Ref, _, _, _Reason}, State0) -> - case ets:match_object(?PROCS, #proc_int{client=Ref, _='_'}) of + case ets:match_object(?PROCS, #proc_int{client = Ref, _ = '_'}) of [#proc_int{} = Proc] -> {noreply, return_proc(State0, Proc)}; [] -> {noreply, State0} end; - - handle_info(restart_config_listener, State) -> ok = config:listen_for_changes(?MODULE, nil), {noreply, State}; - handle_info(_Msg, State) -> {noreply, State}. - -code_change(_OldVsn, #state{}=State, _Extra) -> +code_change(_OldVsn, #state{} = State, _Extra) -> {ok, State}. handle_config_terminate(_, stop, _) -> @@ -284,7 +263,6 @@ handle_config_change("query_server_config", _, _, _, _) -> handle_config_change(_, _, _, _, _) -> {ok, undefined}. - find_proc(#client{lang = Lang, ddoc_key = undefined}) -> Pred = fun(_) -> true @@ -296,7 +274,7 @@ find_proc(#client{lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client) -> end, case find_proc(Lang, Pred) of not_found -> - case find_proc(Client#client{ddoc_key=undefined}) of + case find_proc(Client#client{ddoc_key = undefined}) of {ok, Proc} -> teach_ddoc(DDoc, DDocKey, Proc); Else -> @@ -313,9 +291,8 @@ find_proc(Lang, Fun) -> {error, Reason} end. - iter_procs(Lang, Fun) when is_binary(Lang) -> - Pattern = #proc_int{lang=Lang, client=undefined, _='_'}, + Pattern = #proc_int{lang = Lang, client = undefined, _ = '_'}, MSpec = [{Pattern, [], ['$_']}], case ets:select_reverse(?PROCS, MSpec, 25) of '$end_of_table' -> @@ -324,7 +301,6 @@ iter_procs(Lang, Fun) when is_binary(Lang) -> iter_procs_int(Continuation, Fun) end. - iter_procs_int({[], Continuation0}, Fun) -> case ets:select_reverse(Continuation0) of '$end_of_table' -> @@ -340,7 +316,6 @@ iter_procs_int({[Proc | Rest], Continuation}, Fun) -> iter_procs_int({Rest, Continuation}, Fun) end. - spawn_proc(State, Client) -> Pid = spawn_link(?MODULE, new_proc, [Client]), ets:insert(?OPENING, {Pid, Client}), @@ -350,36 +325,38 @@ spawn_proc(State, Client) -> counts = dict:update_counter(Lang, 1, Counts) }. - -new_proc(#client{ddoc=undefined, ddoc_key=undefined}=Client) -> - #client{from=From, lang=Lang} = Client, - Resp = try - case new_proc_int(From, Lang) of - {ok, Proc} -> - {spawn_ok, Proc, From}; - Error -> - gen_server:reply(From, {error, Error}), +new_proc(#client{ddoc = undefined, ddoc_key = undefined} = Client) -> + #client{from = From, lang = Lang} = Client, + Resp = + try + case new_proc_int(From, Lang) of + {ok, Proc} -> + {spawn_ok, Proc, From}; + Error -> + gen_server:reply(From, {error, Error}), + spawn_error + end + catch + _:_ -> spawn_error - end - catch _:_ -> - spawn_error - end, + end, exit(Resp); - new_proc(Client) -> - #client{from=From, lang=Lang, ddoc=DDoc, ddoc_key=DDocKey} = Client, - Resp = try - case new_proc_int(From, Lang) of - {ok, NewProc} -> - {ok, Proc} = teach_ddoc(DDoc, DDocKey, NewProc), - {spawn_ok, Proc, From}; - Error -> - gen_server:reply(From, {error, Error}), - spawn_error - end - catch _:_ -> - spawn_error - end, + #client{from = From, lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client, + Resp = + try + case new_proc_int(From, Lang) of + {ok, NewProc} -> + {ok, Proc} = teach_ddoc(DDoc, DDocKey, NewProc), + {spawn_ok, Proc, From}; + Error -> + gen_server:reply(From, {error, Error}), + spawn_error + end + catch + _:_ -> + spawn_error + end, exit(Resp). split_string_if_longer(String, Pos) -> @@ -399,14 +376,17 @@ split_by_char(String, Char) -> get_servers_from_env(Spec) -> SpecLen = length(Spec), % loop over os:getenv(), match SPEC_ - lists:filtermap(fun(EnvStr) -> - case split_string_if_longer(EnvStr, SpecLen) of - {Spec, Rest} -> - {true, split_by_char(Rest, $=)}; - _ -> - false - end - end, os:getenv()). + lists:filtermap( + fun(EnvStr) -> + case split_string_if_longer(EnvStr, SpecLen) of + {Spec, Rest} -> + {true, split_by_char(Rest, $=)}; + _ -> + false + end + end, + os:getenv() + ). get_query_server(LangStr) -> case ets:lookup(?SERVERS, string:to_upper(LangStr)) of @@ -425,39 +405,39 @@ native_query_server_enabled() -> maybe_configure_erlang_native_servers() -> case native_query_server_enabled() of true -> - ets:insert(?SERVERS, [ - {"ERLANG", {couch_native_process, start_link, []}}]); + ets:insert(?SERVERS, [ + {"ERLANG", {couch_native_process, start_link, []}} + ]); _Else -> - ok + ok end. new_proc_int(From, Lang) when is_binary(Lang) -> LangStr = binary_to_list(Lang), case get_query_server(LangStr) of - undefined -> - gen_server:reply(From, {unknown_query_language, Lang}); - {M, F, A} -> - {ok, Pid} = apply(M, F, A), - make_proc(Pid, Lang, M); - Command -> - {ok, Pid} = couch_os_process:start_link(Command), - make_proc(Pid, Lang, couch_os_process) + undefined -> + gen_server:reply(From, {unknown_query_language, Lang}); + {M, F, A} -> + {ok, Pid} = apply(M, F, A), + make_proc(Pid, Lang, M); + Command -> + {ok, Pid} = couch_os_process:start_link(Command), + make_proc(Pid, Lang, couch_os_process) end. - -teach_ddoc(DDoc, {DDocId, _Rev}=DDocKey, #proc_int{ddoc_keys=Keys}=Proc) -> +teach_ddoc(DDoc, {DDocId, _Rev} = DDocKey, #proc_int{ddoc_keys = Keys} = Proc) -> % send ddoc over the wire % we only share the rev with the client we know to update code % but it only keeps the latest copy, per each ddoc, around. true = couch_query_servers:proc_prompt( export_proc(Proc), - [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])]), + [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])] + ), % we should remove any other ddocs keys for this docid % because the query server overwrites without the rev - Keys2 = [{D,R} || {D,R} <- Keys, D /= DDocId], + Keys2 = [{D, R} || {D, R} <- Keys, D /= DDocId], % add ddoc to the proc - {ok, Proc#proc_int{ddoc_keys=[DDocKey|Keys2]}}. - + {ok, Proc#proc_int{ddoc_keys = [DDocKey | Keys2]}}. make_proc(Pid, Lang, Mod) when is_binary(Lang) -> Proc = #proc_int{ @@ -470,42 +450,42 @@ make_proc(Pid, Lang, Mod) when is_binary(Lang) -> unlink(Pid), {ok, Proc}. - -assign_proc(Pid, #proc_int{client=undefined}=Proc0) when is_pid(Pid) -> +assign_proc(Pid, #proc_int{client = undefined} = Proc0) when is_pid(Pid) -> Proc = Proc0#proc_int{client = erlang:monitor(process, Pid)}, ets:insert(?PROCS, Proc), export_proc(Proc); -assign_proc(#client{}=Client, #proc_int{client=undefined}=Proc) -> +assign_proc(#client{} = Client, #proc_int{client = undefined} = Proc) -> {Pid, _} = Client#client.from, assign_proc(Pid, Proc). - return_proc(#state{} = State, #proc_int{} = ProcInt) -> #proc_int{pid = Pid, lang = Lang} = ProcInt, - NewState = case is_process_alive(Pid) of true -> - case ProcInt#proc_int.t0 < State#state.threshold_ts of + NewState = + case is_process_alive(Pid) of true -> - remove_proc(State, ProcInt); + case ProcInt#proc_int.t0 < State#state.threshold_ts of + true -> + remove_proc(State, ProcInt); + false -> + gen_server:cast(Pid, garbage_collect), + true = ets:update_element(?PROCS, Pid, [ + {#proc_int.client, undefined} + ]), + State + end; false -> - gen_server:cast(Pid, garbage_collect), - true = ets:update_element(?PROCS, Pid, [ - {#proc_int.client, undefined} - ]), - State - end; - false -> - remove_proc(State, ProcInt) - end, + remove_proc(State, ProcInt) + end, flush_waiters(NewState, Lang). - -remove_proc(State, #proc_int{}=Proc) -> +remove_proc(State, #proc_int{} = Proc) -> ets:delete(?PROCS, Proc#proc_int.pid), - case is_process_alive(Proc#proc_int.pid) of true -> - unlink(Proc#proc_int.pid), - gen_server:cast(Proc#proc_int.pid, stop); - false -> - ok + case is_process_alive(Proc#proc_int.pid) of + true -> + unlink(Proc#proc_int.pid), + gen_server:cast(Proc#proc_int.pid, stop); + false -> + ok end, Counts = State#state.counts, Lang = Proc#proc_int.lang, @@ -513,7 +493,6 @@ remove_proc(State, #proc_int{}=Proc) -> counts = dict:update_counter(Lang, -1, Counts) }. - -spec export_proc(#proc_int{}) -> #proc{}. export_proc(#proc_int{} = ProcInt) -> ProcIntList = tuple_to_list(ProcInt), @@ -521,17 +500,19 @@ export_proc(#proc_int{} = ProcInt) -> [_ | Data] = lists:sublist(ProcIntList, ProcLen), list_to_tuple([proc | Data]). - flush_waiters(State) -> - dict:fold(fun(Lang, Count, StateAcc) -> - case Count < State#state.hard_limit of - true -> - flush_waiters(StateAcc, Lang); - false -> - StateAcc - end - end, State, State#state.counts). - + dict:fold( + fun(Lang, Count, StateAcc) -> + case Count < State#state.hard_limit of + true -> + flush_waiters(StateAcc, Lang); + false -> + StateAcc + end + end, + State, + State#state.counts + ). flush_waiters(State, Lang) -> CanSpawn = can_spawn(State, Lang), @@ -558,31 +539,27 @@ flush_waiters(State, Lang) -> State end. - add_waiting_client(Client) -> - ets:insert(?WAITERS, Client#client{timestamp=os:timestamp()}). + ets:insert(?WAITERS, Client#client{timestamp = os:timestamp()}). -spec get_waiting_client(Lang :: binary()) -> undefined | #client{}. get_waiting_client(Lang) -> - case ets:match_object(?WAITERS, #client{lang=Lang, _='_'}, 1) of + case ets:match_object(?WAITERS, #client{lang = Lang, _ = '_'}, 1) of '$end_of_table' -> undefined; - {[#client{}=Client], _} -> + {[#client{} = Client], _} -> Client end. - remove_waiting_client(#client{timestamp = Timestamp}) -> ets:delete(?WAITERS, Timestamp). - can_spawn(#state{hard_limit = HardLimit, counts = Counts}, Lang) -> case dict:find(Lang, Counts) of {ok, Count} -> Count < HardLimit; error -> true end. - get_proc_config() -> Limit = config:get_boolean("query_server_config", "reduce_limit", true), Timeout = config:get_integer("couchdb", "os_process_timeout", 5000), @@ -591,11 +568,9 @@ get_proc_config() -> {<<"timeout">>, Timeout} ]}. - get_hard_limit() -> LimStr = config:get("query_server_config", "os_process_limit", "100"), list_to_integer(LimStr). - get_soft_limit() -> config:get_integer("query_server_config", "os_process_soft_limit", 100). diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl index 10b8048dd..5dd7c4a4b 100644 --- a/src/couch/src/couch_query_servers.erl +++ b/src/couch/src/couch_query_servers.erl @@ -14,7 +14,7 @@ -export([try_compile/4]). -export([start_doc_map/3, map_doc_raw/2, stop_doc_map/1, raw_to_ejson/1]). --export([reduce/3, rereduce/3,validate_doc_update/5]). +-export([reduce/3, rereduce/3, validate_doc_update/5]). -export([filter_docs/5]). -export([filter_view/3]). -export([finalize/2]). @@ -27,14 +27,17 @@ -include_lib("couch/include/couch_db.hrl"). --define(SUMERROR, <<"The _sum function requires that map values be numbers, " +-define(SUMERROR, << + "The _sum function requires that map values be numbers, " "arrays of numbers, or objects. Objects cannot be mixed with other " "data structures. Objects can be arbitrarily nested, provided that the values " - "for all fields are themselves numbers, arrays of numbers, or objects.">>). - --define(STATERROR, <<"The _stats function requires that map values be numbers " - "or arrays of numbers, not '~p'">>). + "for all fields are themselves numbers, arrays of numbers, or objects." +>>). +-define(STATERROR, << + "The _stats function requires that map values be numbers " + "or arrays of numbers, not '~p'" +>>). try_compile(Proc, FunctionType, FunctionName, FunctionSource) -> try @@ -54,20 +57,21 @@ try_compile(Proc, FunctionType, FunctionName, FunctionSource) -> start_doc_map(Lang, Functions, Lib) -> Proc = get_os_process(Lang), case Lib of - {[]} -> ok; - Lib -> - true = proc_prompt(Proc, [<<"add_lib">>, Lib]) + {[]} -> ok; + Lib -> true = proc_prompt(Proc, [<<"add_lib">>, Lib]) end, - lists:foreach(fun(FunctionSource) -> - true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource]) - end, Functions), + lists:foreach( + fun(FunctionSource) -> + true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource]) + end, + Functions + ), {ok, Proc}. map_doc_raw(Proc, Doc) -> Json = couch_doc:to_json_obj(Doc, []), {ok, proc_prompt_raw(Proc, [<<"map_doc">>, Json])}. - stop_doc_map(nil) -> ok; stop_doc_map(Proc) -> @@ -77,20 +81,24 @@ group_reductions_results([]) -> []; group_reductions_results(List) -> {Heads, Tails} = lists:foldl( - fun([H|T], {HAcc,TAcc}) -> - {[H|HAcc], [T|TAcc]} - end, {[], []}, List), + fun([H | T], {HAcc, TAcc}) -> + {[H | HAcc], [T | TAcc]} + end, + {[], []}, + List + ), case Tails of - [[]|_] -> % no tails left - [Heads]; - _ -> - [Heads | group_reductions_results(Tails)] + % no tails left + [[] | _] -> + [Heads]; + _ -> + [Heads | group_reductions_results(Tails)] end. -finalize(<<"_approx_count_distinct",_/binary>>, Reduction) -> +finalize(<<"_approx_count_distinct", _/binary>>, Reduction) -> true = hyper:is_hyper(Reduction), {ok, round(hyper:card(Reduction))}; -finalize(<<"_stats",_/binary>>, Unpacked) -> +finalize(<<"_stats", _/binary>>, Unpacked) -> {ok, pack_stats(Unpacked)}; finalize(_RedSrc, Reduction) -> {ok, Reduction}. @@ -101,45 +109,51 @@ rereduce(Lang, RedSrcs, ReducedValues) -> Grouped = group_reductions_results(ReducedValues), Results = lists:zipwith( fun - (<<"_", _/binary>> = FunSrc, Values) -> - {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []), - Result; - (FunSrc, Values) -> - os_rereduce(Lang, [FunSrc], Values) - end, RedSrcs, Grouped), + (<<"_", _/binary>> = FunSrc, Values) -> + {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []), + Result; + (FunSrc, Values) -> + os_rereduce(Lang, [FunSrc], Values) + end, + RedSrcs, + Grouped + ), {ok, Results}. reduce(_Lang, [], _KVs) -> {ok, []}; reduce(Lang, RedSrcs, KVs) -> - {OsRedSrcs, BuiltinReds} = lists:partition(fun - (<<"_", _/binary>>) -> false; - (_OsFun) -> true - end, RedSrcs), + {OsRedSrcs, BuiltinReds} = lists:partition( + fun + (<<"_", _/binary>>) -> false; + (_OsFun) -> true + end, + RedSrcs + ), {ok, OsResults} = os_reduce(Lang, OsRedSrcs, KVs), {ok, BuiltinResults} = builtin_reduce(reduce, BuiltinReds, KVs, []), recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, []). - recombine_reduce_results([], [], [], Acc) -> {ok, lists:reverse(Acc)}; -recombine_reduce_results([<<"_", _/binary>>|RedSrcs], OsResults, [BRes|BuiltinResults], Acc) -> - recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes|Acc]); -recombine_reduce_results([_OsFun|RedSrcs], [OsR|OsResults], BuiltinResults, Acc) -> - recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR|Acc]). +recombine_reduce_results([<<"_", _/binary>> | RedSrcs], OsResults, [BRes | BuiltinResults], Acc) -> + recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes | Acc]); +recombine_reduce_results([_OsFun | RedSrcs], [OsR | OsResults], BuiltinResults, Acc) -> + recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR | Acc]). os_reduce(_Lang, [], _KVs) -> {ok, []}; os_reduce(Lang, OsRedSrcs, KVs) -> Proc = get_os_process(Lang), - OsResults = try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of - [true, Reductions] -> Reductions - catch - throw:{reduce_overflow_error, Msg} -> - [{[{reduce_overflow_error, Msg}]} || _ <- OsRedSrcs] - after - ok = ret_os_process(Proc) - end, + OsResults = + try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of + [true, Reductions] -> Reductions + catch + throw:{reduce_overflow_error, Msg} -> + [{[{reduce_overflow_error, Msg}]} || _ <- OsRedSrcs] + after + ok = ret_os_process(Proc) + end, {ok, OsResults}. os_rereduce(Lang, OsRedSrcs, KVs) -> @@ -158,7 +172,6 @@ os_rereduce(Lang, OsRedSrcs, KVs) -> Error end. - get_overflow_error([]) -> undefined; get_overflow_error([{[{reduce_overflow_error, _}]} = Error | _]) -> @@ -166,26 +179,24 @@ get_overflow_error([{[{reduce_overflow_error, _}]} = Error | _]) -> get_overflow_error([_ | Rest]) -> get_overflow_error(Rest). - builtin_reduce(_Re, [], _KVs, Acc) -> {ok, lists:reverse(Acc)}; -builtin_reduce(Re, [<<"_sum",_/binary>>|BuiltinReds], KVs, Acc) -> +builtin_reduce(Re, [<<"_sum", _/binary>> | BuiltinReds], KVs, Acc) -> Sum = builtin_sum_rows(KVs, 0), Red = check_sum_overflow(?term_size(KVs), ?term_size(Sum), Sum), - builtin_reduce(Re, BuiltinReds, KVs, [Red|Acc]); -builtin_reduce(reduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) -> + builtin_reduce(Re, BuiltinReds, KVs, [Red | Acc]); +builtin_reduce(reduce, [<<"_count", _/binary>> | BuiltinReds], KVs, Acc) -> Count = length(KVs), - builtin_reduce(reduce, BuiltinReds, KVs, [Count|Acc]); -builtin_reduce(rereduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) -> + builtin_reduce(reduce, BuiltinReds, KVs, [Count | Acc]); +builtin_reduce(rereduce, [<<"_count", _/binary>> | BuiltinReds], KVs, Acc) -> Count = builtin_sum_rows(KVs, 0), - builtin_reduce(rereduce, BuiltinReds, KVs, [Count|Acc]); -builtin_reduce(Re, [<<"_stats",_/binary>>|BuiltinReds], KVs, Acc) -> + builtin_reduce(rereduce, BuiltinReds, KVs, [Count | Acc]); +builtin_reduce(Re, [<<"_stats", _/binary>> | BuiltinReds], KVs, Acc) -> Stats = builtin_stats(Re, KVs), - builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]); -builtin_reduce(Re, [<<"_approx_count_distinct",_/binary>>|BuiltinReds], KVs, Acc) -> + builtin_reduce(Re, BuiltinReds, KVs, [Stats | Acc]); +builtin_reduce(Re, [<<"_approx_count_distinct", _/binary>> | BuiltinReds], KVs, Acc) -> Distinct = approx_count_distinct(Re, KVs), - builtin_reduce(Re, BuiltinReds, KVs, [Distinct|Acc]). - + builtin_reduce(Re, BuiltinReds, KVs, [Distinct | Acc]). builtin_sum_rows([], Acc) -> Acc; @@ -197,11 +208,13 @@ builtin_sum_rows([[_Key, Value] | RestKVs], Acc) -> throw:{builtin_reduce_error, Obj} -> Obj; throw:{invalid_value, Reason, Cause} -> - {[{<<"error">>, <<"builtin_reduce_error">>}, - {<<"reason">>, Reason}, {<<"caused_by">>, Cause}]} + {[ + {<<"error">>, <<"builtin_reduce_error">>}, + {<<"reason">>, Reason}, + {<<"caused_by">>, Cause} + ]} end. - sum_values(Value, Acc) when is_number(Value), is_number(Acc) -> Acc + Value; sum_values(Value, Acc) when is_list(Value), is_list(Acc) -> @@ -239,12 +252,12 @@ sum_objects(Rest, []) -> sum_arrays([], []) -> []; -sum_arrays([_|_]=Xs, []) -> +sum_arrays([_ | _] = Xs, []) -> Xs; -sum_arrays([], [_|_]=Ys) -> +sum_arrays([], [_ | _] = Ys) -> Ys; -sum_arrays([X|Xs], [Y|Ys]) when is_number(X), is_number(Y) -> - [X+Y | sum_arrays(Xs,Ys)]; +sum_arrays([X | Xs], [Y | Ys]) when is_number(X), is_number(Y) -> + [X + Y | sum_arrays(Xs, Ys)]; sum_arrays(Else, _) -> throw_sum_error(Else). @@ -265,37 +278,42 @@ check_sum_overflow(InSize, OutSize, Sum) -> end. log_sum_overflow(InSize, OutSize) -> - Fmt = "Reduce output must shrink more rapidly: " - "input size: ~b " - "output size: ~b", + Fmt = + "Reduce output must shrink more rapidly: " + "input size: ~b " + "output size: ~b", Msg = iolist_to_binary(io_lib:format(Fmt, [InSize, OutSize])), couch_log:error(Msg, []), Msg. builtin_stats(_, []) -> {0, 0, 0, 0, 0}; -builtin_stats(_, [[_,First]|Rest]) -> - lists:foldl(fun([_Key, Value], Acc) -> - stat_values(Value, Acc) - end, build_initial_accumulator(First), Rest). +builtin_stats(_, [[_, First] | Rest]) -> + lists:foldl( + fun([_Key, Value], Acc) -> + stat_values(Value, Acc) + end, + build_initial_accumulator(First), + Rest + ). stat_values(Value, Acc) when is_list(Value), is_list(Acc) -> lists:zipwith(fun stat_values/2, Value, Acc); stat_values({PreRed}, Acc) when is_list(PreRed) -> stat_values(unpack_stats({PreRed}), Acc); stat_values(Value, Acc) when is_number(Value) -> - stat_values({Value, 1, Value, Value, Value*Value}, Acc); + stat_values({Value, 1, Value, Value, Value * Value}, Acc); stat_values(Value, Acc) when is_number(Acc) -> - stat_values(Value, {Acc, 1, Acc, Acc, Acc*Acc}); + stat_values(Value, {Acc, 1, Acc, Acc, Acc * Acc}); stat_values(Value, Acc) when is_tuple(Value), is_tuple(Acc) -> {Sum0, Cnt0, Min0, Max0, Sqr0} = Value, {Sum1, Cnt1, Min1, Max1, Sqr1} = Acc, { - Sum0 + Sum1, - Cnt0 + Cnt1, - erlang:min(Min0, Min1), - erlang:max(Max0, Max1), - Sqr0 + Sqr1 + Sum0 + Sum1, + Cnt0 + Cnt1, + erlang:min(Min0, Min1), + erlang:max(Max0, Max1), + Sqr0 + Sqr1 }; stat_values(Else, _Acc) -> throw_stat_error(Else). @@ -303,7 +321,7 @@ stat_values(Else, _Acc) -> build_initial_accumulator(L) when is_list(L) -> [build_initial_accumulator(X) || X <- L]; build_initial_accumulator(X) when is_number(X) -> - {X, 1, X, X, X*X}; + {X, 1, X, X, X * X}; build_initial_accumulator({_, _, _, _, _} = AlreadyUnpacked) -> AlreadyUnpacked; build_initial_accumulator({Props}) -> @@ -314,16 +332,21 @@ build_initial_accumulator(Else) -> unpack_stats({PreRed}) when is_list(PreRed) -> { - get_number(<<"sum">>, PreRed), - get_number(<<"count">>, PreRed), - get_number(<<"min">>, PreRed), - get_number(<<"max">>, PreRed), - get_number(<<"sumsqr">>, PreRed) + get_number(<<"sum">>, PreRed), + get_number(<<"count">>, PreRed), + get_number(<<"min">>, PreRed), + get_number(<<"max">>, PreRed), + get_number(<<"sumsqr">>, PreRed) }. - pack_stats({Sum, Cnt, Min, Max, Sqr}) -> - {[{<<"sum">>,Sum}, {<<"count">>,Cnt}, {<<"min">>,Min}, {<<"max">>,Max}, {<<"sumsqr">>,Sqr}]}; + {[ + {<<"sum">>, Sum}, + {<<"count">>, Cnt}, + {<<"min">>, Min}, + {<<"max">>, Max}, + {<<"sumsqr">>, Sqr} + ]}; pack_stats({Packed}) -> % Legacy code path before we had the finalize operation {Packed}; @@ -332,35 +355,43 @@ pack_stats(Stats) when is_list(Stats) -> get_number(Key, Props) -> case couch_util:get_value(Key, Props) of - X when is_number(X) -> - X; - undefined when is_binary(Key) -> - get_number(binary_to_atom(Key, latin1), Props); - undefined -> - Msg = io_lib:format("user _stats input missing required field ~s (~p)", - [Key, Props]), - throw({invalid_value, iolist_to_binary(Msg)}); - Else -> - Msg = io_lib:format("non-numeric _stats input received for ~s: ~w", - [Key, Else]), - throw({invalid_value, iolist_to_binary(Msg)}) + X when is_number(X) -> + X; + undefined when is_binary(Key) -> + get_number(binary_to_atom(Key, latin1), Props); + undefined -> + Msg = io_lib:format( + "user _stats input missing required field ~s (~p)", + [Key, Props] + ), + throw({invalid_value, iolist_to_binary(Msg)}); + Else -> + Msg = io_lib:format( + "non-numeric _stats input received for ~s: ~w", + [Key, Else] + ), + throw({invalid_value, iolist_to_binary(Msg)}) end. % TODO allow customization of precision in the ddoc. approx_count_distinct(reduce, KVs) -> - lists:foldl(fun([[Key, _Id], _Value], Filter) -> - hyper:insert(term_to_binary(Key), Filter) - end, hyper:new(11), KVs); + lists:foldl( + fun([[Key, _Id], _Value], Filter) -> + hyper:insert(term_to_binary(Key), Filter) + end, + hyper:new(11), + KVs + ); approx_count_distinct(rereduce, Reds) -> hyper:union([Filter || [_, Filter] <- Reds]). % use the function stored in ddoc.validate_doc_update to test an update. -spec validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) -> ok when - DDoc :: ddoc(), + DDoc :: ddoc(), EditDoc :: doc(), DiskDoc :: doc() | nil, - Ctx :: user_ctx(), - SecObj :: sec_obj(). + Ctx :: user_ctx(), + SecObj :: sec_obj(). validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) -> JsonEditDoc = couch_doc:to_json_obj(EditDoc, [revs]), @@ -370,8 +401,9 @@ validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) -> [<<"validate_doc_update">>], [JsonEditDoc, JsonDiskDoc, Ctx, SecObj] ), - if Resp == 1 -> ok; true -> - couch_stats:increment_counter([couchdb, query_server, vdu_rejects], 1) + if + Resp == 1 -> ok; + true -> couch_stats:increment_counter([couchdb, query_server, vdu_rejects], 1) end, case Resp of RespCode when RespCode =:= 1; RespCode =:= ok; RespCode =:= true -> @@ -386,11 +418,15 @@ validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) -> throw({unknown_error, Message}) end. - rewrite(Req, Db, DDoc) -> - Fields = [F || F <- chttpd_external:json_req_obj_fields(), - F =/= <<"info">>, F =/= <<"form">>, - F =/= <<"uuid">>, F =/= <<"id">>], + Fields = [ + F + || F <- chttpd_external:json_req_obj_fields(), + F =/= <<"info">>, + F =/= <<"form">>, + F =/= <<"uuid">>, + F =/= <<"id">> + ], JsonReq = chttpd_external:json_req_obj(Req, Db, null, Fields), case couch_query_servers:ddoc_prompt(DDoc, [<<"rewrites">>], [JsonReq]) of {[{<<"forbidden">>, Message}]} -> @@ -399,10 +435,10 @@ rewrite(Req, Db, DDoc) -> throw({unauthorized, Message}); [<<"no_dispatch_rule">>] -> undefined; - [<<"ok">>, {V}=Rewrite] when is_list(V) -> + [<<"ok">>, {V} = Rewrite] when is_list(V) -> ok = validate_rewrite_response(Rewrite), Rewrite; - [<<"ok">>, _] -> + [<<"ok">>, _] -> throw_rewrite_error(<<"bad rewrite">>); V -> couch_log:error("bad rewrite return ~p", [V]), @@ -430,15 +466,17 @@ validate_rewrite_response_field(<<"body">>, Body) when is_binary(Body) -> ok; validate_rewrite_response_field(<<"body">>, _) -> throw_rewrite_error(<<"bad body">>); -validate_rewrite_response_field(<<"headers">>, {Props}=Headers) when is_list(Props) -> +validate_rewrite_response_field(<<"headers">>, {Props} = Headers) when is_list(Props) -> validate_object_fields(Headers); validate_rewrite_response_field(<<"headers">>, _) -> throw_rewrite_error(<<"bad headers">>); -validate_rewrite_response_field(<<"query">>, {Props}=Query) when is_list(Props) -> +validate_rewrite_response_field(<<"query">>, {Props} = Query) when is_list(Props) -> validate_object_fields(Query); validate_rewrite_response_field(<<"query">>, _) -> throw_rewrite_error(<<"bad query">>); -validate_rewrite_response_field(<<"code">>, Code) when is_integer(Code) andalso Code >= 200 andalso Code < 600 -> +validate_rewrite_response_field(<<"code">>, Code) when + is_integer(Code) andalso Code >= 200 andalso Code < 600 +-> ok; validate_rewrite_response_field(<<"code">>, _) -> throw_rewrite_error(<<"bad code">>); @@ -447,24 +485,26 @@ validate_rewrite_response_field(K, V) -> ok. validate_object_fields({Props}) when is_list(Props) -> - lists:foreach(fun - ({Key, Value}) when is_binary(Key) andalso is_binary(Value) -> - ok; - ({Key, Value}) -> - Reason = io_lib:format( - "object key/value must be strings ~p=~p", [Key, Value]), - throw_rewrite_error(Reason); - (Value) -> - throw_rewrite_error(io_lib:format("bad value ~p", [Value])) - end, Props). - + lists:foreach( + fun + ({Key, Value}) when is_binary(Key) andalso is_binary(Value) -> + ok; + ({Key, Value}) -> + Reason = io_lib:format( + "object key/value must be strings ~p=~p", [Key, Value] + ), + throw_rewrite_error(Reason); + (Value) -> + throw_rewrite_error(io_lib:format("bad value ~p", [Value])) + end, + Props + ). -throw_rewrite_error(Reason) when is_list(Reason)-> +throw_rewrite_error(Reason) when is_list(Reason) -> throw_rewrite_error(iolist_to_binary(Reason)); throw_rewrite_error(Reason) when is_binary(Reason) -> throw({rewrite_error, Reason}). - json_doc_options() -> json_doc_options([]). @@ -487,18 +527,19 @@ filter_view(DDoc, VName, Docs) -> {ok, Passes}. filter_docs(Req, Db, DDoc, FName, Docs) -> - JsonReq = case Req of - {json_req, JsonObj} -> - JsonObj; - #httpd{} = HttpReq -> - chttpd_external:json_req_obj(HttpReq, Db) - end, + JsonReq = + case Req of + {json_req, JsonObj} -> + JsonObj; + #httpd{} = HttpReq -> + chttpd_external:json_req_obj(HttpReq, Db) + end, Options = json_doc_options(), JsonDocs = [json_doc(Doc, Options) || Doc <- Docs], try {ok, filter_docs_int(DDoc, FName, JsonReq, JsonDocs)} catch - throw:{os_process_error,{exit_status,1}} -> + throw:{os_process_error, {exit_status, 1}} -> %% batch used too much memory, retry sequentially. Fun = fun(JsonDoc) -> filter_docs_int(DDoc, FName, JsonReq, [JsonDoc]) @@ -507,8 +548,11 @@ filter_docs(Req, Db, DDoc, FName, Docs) -> end. filter_docs_int(DDoc, FName, JsonReq, JsonDocs) -> - [true, Passes] = ddoc_prompt(DDoc, [<<"filters">>, FName], - [JsonDocs, JsonReq]), + [true, Passes] = ddoc_prompt( + DDoc, + [<<"filters">>, FName], + [JsonDocs, JsonReq] + ), Passes. ddoc_proc_prompt({Proc, DDocId}, FunPath, Args) -> @@ -533,12 +577,12 @@ with_ddoc_proc(#doc{id=DDocId,revs={Start, [DiskRev|_]}}=DDoc, Fun) -> end. proc_prompt(Proc, Args) -> - case proc_prompt_raw(Proc, Args) of - {json, Json} -> - raw_to_ejson({json, Json}); - EJson -> - EJson - end. + case proc_prompt_raw(Proc, Args) of + {json, Json} -> + raw_to_ejson({json, Json}); + EJson -> + EJson + end. proc_prompt_raw(#proc{prompt_fun = {Mod, Func}} = Proc, Args) -> apply(Mod, Func, [Proc#proc.pid, Args]). @@ -546,13 +590,16 @@ proc_prompt_raw(#proc{prompt_fun = {Mod, Func}} = Proc, Args) -> raw_to_ejson({json, Json}) -> try ?JSON_DECODE(Json) - catch throw:{invalid_json, {_, invalid_string}} -> - Forced = try - force_utf8(Json) - catch _:_ -> - Json - end, - ?JSON_DECODE(Forced) + catch + throw:{invalid_json, {_, invalid_string}} -> + Forced = + try + force_utf8(Json) + catch + _:_ -> + Json + end, + ?JSON_DECODE(Forced) end; raw_to_ejson(EJson) -> EJson. @@ -561,14 +608,15 @@ force_utf8(Bin) -> case binary:match(Bin, <<"\\u">>) of {Start, 2} -> <<Prefix:Start/binary, Rest1/binary>> = Bin, - {Insert, Rest3} = case check_uescape(Rest1) of - {ok, Skip} -> - <<Skipped:Skip/binary, Rest2/binary>> = Rest1, - {Skipped, Rest2}; - {error, Skip} -> - <<_:Skip/binary, Rest2/binary>> = Rest1, - {<<16#EF, 16#BF, 16#BD>>, Rest2} - end, + {Insert, Rest3} = + case check_uescape(Rest1) of + {ok, Skip} -> + <<Skipped:Skip/binary, Rest2/binary>> = Rest1, + {Skipped, Rest2}; + {error, Skip} -> + <<_:Skip/binary, Rest2/binary>> = Rest1, + {<<16#EF, 16#BF, 16#BD>>, Rest2} + end, RestForced = force_utf8(Rest3), <<Prefix/binary, Insert/binary, RestForced/binary>>; nomatch -> @@ -588,8 +636,9 @@ check_uescape(Data) -> try [_] = xmerl_ucs:from_utf16be(UTF16), {ok, 12} - catch _:_ -> - {error, 6} + catch + _:_ -> + {error, 6} end; {_, _} -> % Found a uescape that's not a low half @@ -628,33 +677,33 @@ get_os_process_timeout() -> get_ddoc_process(#doc{} = DDoc, DDocKey) -> % remove this case statement case gen_server:call(couch_proc_manager, {get_proc, DDoc, DDocKey}, get_os_process_timeout()) of - {ok, Proc, {QueryConfig}} -> - % process knows the ddoc - case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of - true -> - proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)), - Proc; - _ -> - catch proc_stop(Proc), - get_ddoc_process(DDoc, DDocKey) - end; - Error -> - throw(Error) + {ok, Proc, {QueryConfig}} -> + % process knows the ddoc + case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of + true -> + proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)), + Proc; + _ -> + catch proc_stop(Proc), + get_ddoc_process(DDoc, DDocKey) + end; + Error -> + throw(Error) end. get_os_process(Lang) -> case gen_server:call(couch_proc_manager, {get_proc, Lang}, get_os_process_timeout()) of - {ok, Proc, {QueryConfig}} -> - case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of - true -> - proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)), - Proc; - _ -> - catch proc_stop(Proc), - get_os_process(Lang) - end; - Error -> - throw(Error) + {ok, Proc, {QueryConfig}} -> + case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of + true -> + proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)), + Proc; + _ -> + catch proc_stop(Proc), + get_os_process(Lang) + end; + Error -> + throw(Error) end. ret_os_process(Proc) -> @@ -668,7 +717,6 @@ throw_sum_error(Else) -> throw_stat_error(Else) -> throw({invalid_value, iolist_to_binary(io_lib:format(?STATERROR, [Else]))}). - -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). @@ -680,19 +728,38 @@ builtin_sum_rows_negative_test() -> % it's only one document. ?assertEqual(A, builtin_sum_rows([["K", A]], [])), {Result} = builtin_sum_rows([["K", A]], [1, 2, 3]), - ?assertEqual({<<"error">>, <<"builtin_reduce_error">>}, - lists:keyfind(<<"error">>, 1, Result)). + ?assertEqual( + {<<"error">>, <<"builtin_reduce_error">>}, + lists:keyfind(<<"error">>, 1, Result) + ). sum_values_test() -> ?assertEqual(3, sum_values(1, 2)), - ?assertEqual([2,4,6], sum_values(1, [1,4,6])), - ?assertEqual([3,5,7], sum_values([3,2,4], [0,3,3])), - X = {[{<<"a">>,1}, {<<"b">>,[1,2]}, {<<"c">>, {[{<<"d">>,3}]}}, - {<<"g">>,1}]}, - Y = {[{<<"a">>,2}, {<<"b">>,3}, {<<"c">>, {[{<<"e">>, 5}]}}, - {<<"f">>,1}, {<<"g">>,1}]}, - Z = {[{<<"a">>,3}, {<<"b">>,[4,2]}, {<<"c">>, {[{<<"d">>,3},{<<"e">>,5}]}}, - {<<"f">>,1}, {<<"g">>,2}]}, + ?assertEqual([2, 4, 6], sum_values(1, [1, 4, 6])), + ?assertEqual([3, 5, 7], sum_values([3, 2, 4], [0, 3, 3])), + X = + {[ + {<<"a">>, 1}, + {<<"b">>, [1, 2]}, + {<<"c">>, {[{<<"d">>, 3}]}}, + {<<"g">>, 1} + ]}, + Y = + {[ + {<<"a">>, 2}, + {<<"b">>, 3}, + {<<"c">>, {[{<<"e">>, 5}]}}, + {<<"f">>, 1}, + {<<"g">>, 1} + ]}, + Z = + {[ + {<<"a">>, 3}, + {<<"b">>, [4, 2]}, + {<<"c">>, {[{<<"d">>, 3}, {<<"e">>, 5}]}}, + {<<"f">>, 1}, + {<<"g">>, 2} + ]}, ?assertEqual(Z, sum_values(X, Y)), ?assertEqual(Z, sum_values(Y, X)). @@ -701,8 +768,12 @@ sum_values_negative_test() -> A = [{[{<<"a">>, 1}]}, {[{<<"a">>, 2}]}, {[{<<"a">>, 3}]}], B = ["error 1", "error 2"], C = [<<"error 3">>, <<"error 4">>], - KV = {[{<<"error">>, <<"builtin_reduce_error">>}, - {<<"reason">>, ?SUMERROR}, {<<"caused_by">>, <<"some cause">>}]}, + KV = + {[ + {<<"error">>, <<"builtin_reduce_error">>}, + {<<"reason">>, ?SUMERROR}, + {<<"caused_by">>, <<"some cause">>} + ]}, ?assertThrow({invalid_value, _, _}, sum_values(A, [1, 2, 3])), ?assertThrow({invalid_value, _, _}, sum_values(A, 0)), ?assertThrow({invalid_value, _, _}, sum_values(B, [1, 2])), @@ -712,48 +783,103 @@ sum_values_negative_test() -> stat_values_test() -> ?assertEqual({1, 2, 0, 1, 1}, stat_values(1, 0)), ?assertEqual({11, 2, 1, 10, 101}, stat_values(1, 10)), - ?assertEqual([{9, 2, 2, 7, 53}, - {14, 2, 3, 11, 130}, - {18, 2, 5, 13, 194} - ], stat_values([2,3,5], [7,11,13])). + ?assertEqual( + [ + {9, 2, 2, 7, 53}, + {14, 2, 3, 11, 130}, + {18, 2, 5, 13, 194} + ], + stat_values([2, 3, 5], [7, 11, 13]) + ). reduce_stats_test() -> - ?assertEqual([ - {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]} - ], test_reduce(<<"_stats">>, [[[null, key], 2]])), + ?assertEqual( + [ + {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]} + ], + test_reduce(<<"_stats">>, [[[null, key], 2]]) + ), - ?assertEqual([[ - {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]}, - {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]} - ]], test_reduce(<<"_stats">>, [[[null, key],[1,2]]])), + ?assertEqual( + [ + [ + {[ + {<<"sum">>, 1}, + {<<"count">>, 1}, + {<<"min">>, 1}, + {<<"max">>, 1}, + {<<"sumsqr">>, 1} + ]}, + {[ + {<<"sum">>, 2}, + {<<"count">>, 1}, + {<<"min">>, 2}, + {<<"max">>, 2}, + {<<"sumsqr">>, 4} + ]} + ] + ], + test_reduce(<<"_stats">>, [[[null, key], [1, 2]]]) + ), + + ?assertEqual( + {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}, + element(2, finalize(<<"_stats">>, {2, 1, 2, 2, 4})) + ), ?assertEqual( - {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]} - , element(2, finalize(<<"_stats">>, {2, 1, 2, 2, 4}))), - - ?assertEqual([ - {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]}, - {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]} - ], element(2, finalize(<<"_stats">>, [ - {1, 1, 1, 1, 1}, - {2, 1, 2, 2, 4} - ]))), - - ?assertEqual([ - {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]}, - {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]} - ], element(2, finalize(<<"_stats">>, [ - {1, 1, 1, 1, 1}, - {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]} - ]))), - - ?assertEqual([ - {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]}, - {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]} - ], element(2, finalize(<<"_stats">>, [ - {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]}, - {2, 1, 2, 2, 4} - ]))), + [ + {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]}, + {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]} + ], + element( + 2, + finalize(<<"_stats">>, [ + {1, 1, 1, 1, 1}, + {2, 1, 2, 2, 4} + ]) + ) + ), + + ?assertEqual( + [ + {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]}, + {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]} + ], + element( + 2, + finalize(<<"_stats">>, [ + {1, 1, 1, 1, 1}, + {[ + {<<"sum">>, 2}, + {<<"count">>, 1}, + {<<"min">>, 2}, + {<<"max">>, 2}, + {<<"sumsqr">>, 4} + ]} + ]) + ) + ), + + ?assertEqual( + [ + {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]}, + {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]} + ], + element( + 2, + finalize(<<"_stats">>, [ + {[ + {<<"sum">>, 1}, + {<<"count">>, 1}, + {<<"min">>, 1}, + {<<"max">>, 1}, + {<<"sumsqr">>, 1} + ]}, + {2, 1, 2, 2, 4} + ]) + ) + ), ok. test_reduce(Reducer, KVs) -> @@ -773,9 +899,12 @@ force_utf8_test() -> % Truncated but we doesn't break replacements <<"\\u0FA">> ], - lists:foreach(fun(Case) -> - ?assertEqual(Case, force_utf8(Case)) - end, Ok), + lists:foreach( + fun(Case) -> + ?assertEqual(Case, force_utf8(Case)) + end, + Ok + ), NotOk = [ <<"\\uDCA5">>, @@ -788,15 +917,18 @@ force_utf8_test() -> <<"\\uD83D\\u00A0">> ], ToJSON = fun(Bin) -> <<34, Bin/binary, 34>> end, - lists:foreach(fun(Case) -> - try - ?assertNotEqual(Case, force_utf8(Case)), - ?assertThrow(_, ?JSON_DECODE(ToJSON(Case))), - ?assertMatch(<<_/binary>>, ?JSON_DECODE(ToJSON(force_utf8(Case)))) - catch - T:R -> - io:format(standard_error, "~p~n~p~n", [T, R]) - end - end, NotOk). + lists:foreach( + fun(Case) -> + try + ?assertNotEqual(Case, force_utf8(Case)), + ?assertThrow(_, ?JSON_DECODE(ToJSON(Case))), + ?assertMatch(<<_/binary>>, ?JSON_DECODE(ToJSON(force_utf8(Case)))) + catch + T:R -> + io:format(standard_error, "~p~n~p~n", [T, R]) + end + end, + NotOk + ). -endif. diff --git a/src/couch/src/couch_rand.erl b/src/couch/src/couch_rand.erl index 67454b8ad..bc30956a4 100644 --- a/src/couch/src/couch_rand.erl +++ b/src/couch/src/couch_rand.erl @@ -12,16 +12,13 @@ -module(couch_rand). - -export([ uniform/0, uniform/1 ]). - uniform() -> rand:uniform(). - uniform(N) -> rand:uniform(N). diff --git a/src/couch/src/couch_secondary_sup.erl b/src/couch/src/couch_secondary_sup.erl index bb7821555..a328c170e 100644 --- a/src/couch/src/couch_secondary_sup.erl +++ b/src/couch/src/couch_secondary_sup.erl @@ -15,16 +15,12 @@ -export([init/1, start_link/0]). start_link() -> - supervisor:start_link({local,couch_secondary_services}, ?MODULE, []). + supervisor:start_link({local, couch_secondary_services}, ?MODULE, []). init([]) -> SecondarySupervisors = [ - {couch_plugin_event, - {gen_event, start_link, [{local, couch_plugin}]}, - permanent, - brutal_kill, - worker, - dynamic} + {couch_plugin_event, {gen_event, start_link, [{local, couch_plugin}]}, permanent, + brutal_kill, worker, dynamic} ], Daemons = [ {index_server, {couch_index_server, start_link, []}}, @@ -33,31 +29,34 @@ init([]) -> {uuids, {couch_uuids, start, []}} ], - MaybeHttp = case http_enabled() of - true -> [{httpd, {couch_httpd, start_link, []}}]; - false -> couch_httpd:set_auth_handlers(), [] - end, + MaybeHttp = + case http_enabled() of + true -> + [{httpd, {couch_httpd, start_link, []}}]; + false -> + couch_httpd:set_auth_handlers(), + [] + end, - MaybeHttps = case https_enabled() of - true -> [{httpsd, {chttpd, start_link, [https]}}]; - false -> [] - end, + MaybeHttps = + case https_enabled() of + true -> [{httpsd, {chttpd, start_link, [https]}}]; + false -> [] + end, - Children = SecondarySupervisors ++ [ - begin - {Module, Fun, Args} = Spec, + Children = + SecondarySupervisors ++ + [ + begin + {Module, Fun, Args} = Spec, - {Name, - {Module, Fun, Args}, - permanent, - brutal_kill, - worker, - [Module]} - end - || {Name, Spec} - <- Daemons ++ MaybeHttp ++ MaybeHttps, Spec /= ""], - {ok, {{one_for_one, 50, 3600}, - couch_epi:register_service(couch_db_epi, Children)}}. + {Name, {Module, Fun, Args}, permanent, brutal_kill, worker, [Module]} + end + || {Name, Spec} <- + Daemons ++ MaybeHttp ++ MaybeHttps, + Spec /= "" + ], + {ok, {{one_for_one, 50, 3600}, couch_epi:register_service(couch_db_epi, Children)}}. http_enabled() -> config:get_boolean("httpd", "enable", false). diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl index 3c72e3357..06be86786 100644 --- a/src/couch/src/couch_server.erl +++ b/src/couch/src/couch_server.erl @@ -15,11 +15,11 @@ -behaviour(config_listener). -vsn(3). --export([open/2,create/2,delete/2,get_version/0,get_version/1,get_git_sha/0,get_uuid/0]). +-export([open/2, create/2, delete/2, get_version/0, get_version/1, get_git_sha/0, get_uuid/0]). -export([all_databases/0, all_databases/2]). --export([init/1, handle_call/3,sup_start_link/1]). --export([handle_cast/2,code_change/3,handle_info/2,terminate/2]). --export([dev_start/0,is_admin/2,has_admins/0,get_stats/0]). +-export([init/1, handle_call/3, sup_start_link/1]). +-export([handle_cast/2, code_change/3, handle_info/2, terminate/2]). +-export([dev_start/0, is_admin/2, has_admins/0, get_stats/0]). -export([close_db_if_idle/1]). -export([delete_compaction_files/1]). -export([exists/1]). @@ -28,7 +28,7 @@ -export([lock/2, unlock/1]). -export([db_updated/1]). -export([num_servers/0, couch_server/1, couch_dbs_pid_to_name/1, couch_dbs/1]). --export([aggregate_queue_len/0,get_spidermonkey_version/0]). +-export([aggregate_queue_len/0, get_spidermonkey_version/0]). % config_listener api -export([handle_config_change/5, handle_config_terminate/3]). @@ -40,18 +40,18 @@ -define(RELISTEN_DELAY, 5000). -define(DEFAULT_ENGINE, "couch"). --record(server,{ +-record(server, { root_dir = [], engines = [], - max_dbs_open=?MAX_DBS_OPEN, - dbs_open=0, - start_time="", - update_lru_on_read=true, + max_dbs_open = ?MAX_DBS_OPEN, + dbs_open = 0, + start_time = "", + update_lru_on_read = true, lru = couch_lru:new(), couch_dbs, couch_dbs_pid_to_name, couch_dbs_locks - }). +}). dev_start() -> couch:stop(), @@ -59,11 +59,12 @@ dev_start() -> couch:start(). get_version() -> - ?COUCHDB_VERSION. %% Defined in rebar.config.script + %% Defined in rebar.config.script + ?COUCHDB_VERSION. get_version(short) -> - %% strip git hash from version string - [Version|_Rest] = string:tokens(get_version(), "+"), - Version. + %% strip git hash from version string + [Version | _Rest] = string:tokens(get_version(), "+"), + Version. get_git_sha() -> ?COUCHDB_GIT_SHA. @@ -73,16 +74,18 @@ get_uuid() -> UUID = couch_uuids:random(), config:set("couchdb", "uuid", ?b2l(UUID)), UUID; - UUID -> ?l2b(UUID) + UUID -> + ?l2b(UUID) end. get_stats() -> Fun = fun(N, {TimeAcc, OpenAcc}) -> - {ok, #server{start_time=Time,dbs_open=Open}} = + {ok, #server{start_time = Time, dbs_open = Open}} = gen_server:call(couch_server(N), get_server), - {max(Time, TimeAcc), Open + OpenAcc} end, + {max(Time, TimeAcc), Open + OpenAcc} + end, {Time, Open} = - lists:foldl(Fun, {0, 0}, lists:seq(1, num_servers())), + lists:foldl(Fun, {0, 0}, lists:seq(1, num_servers())), [{start_time, ?l2b(Time)}, {dbs_open, Open}]. get_spidermonkey_version() -> list_to_binary(?COUCHDB_SPIDERMONKEY_VERSION). @@ -94,31 +97,32 @@ open(DbName, Options) -> try validate_open_or_create(DbName, Options), open_int(DbName, Options) - catch throw:{?MODULE, Error} -> - Error + catch + throw:{?MODULE, Error} -> + Error end. open_int(DbName, Options0) -> Ctx = couch_util:get_value(user_ctx, Options0, #user_ctx{}), case ets:lookup(couch_dbs(DbName), DbName) of - [#entry{db = Db0, lock = Lock} = Entry] when Lock =/= locked -> - update_lru(DbName, Entry#entry.db_options), - {ok, Db1} = couch_db:incref(Db0), - couch_db:set_user_ctx(Db1, Ctx); - _ -> - Options = maybe_add_sys_db_callbacks(DbName, Options0), - Timeout = couch_util:get_value(timeout, Options, infinity), - Create = couch_util:get_value(create_if_missing, Options, false), - case gen_server:call(couch_server(DbName), {open, DbName, Options}, Timeout) of - {ok, Db0} -> + [#entry{db = Db0, lock = Lock} = Entry] when Lock =/= locked -> + update_lru(DbName, Entry#entry.db_options), {ok, Db1} = couch_db:incref(Db0), couch_db:set_user_ctx(Db1, Ctx); - {not_found, no_db_file} when Create -> - couch_log:warning("creating missing database: ~s", [DbName]), - couch_server:create(DbName, Options); - Error -> - Error - end + _ -> + Options = maybe_add_sys_db_callbacks(DbName, Options0), + Timeout = couch_util:get_value(timeout, Options, infinity), + Create = couch_util:get_value(create_if_missing, Options, false), + case gen_server:call(couch_server(DbName), {open, DbName, Options}, Timeout) of + {ok, Db0} -> + {ok, Db1} = couch_db:incref(Db0), + couch_db:set_user_ctx(Db1, Ctx); + {not_found, no_db_file} when Create -> + couch_log:warning("creating missing database: ~s", [DbName]), + couch_server:create(DbName, Options); + Error -> + Error + end end. update_lru(DbName, Options) -> @@ -132,47 +136,48 @@ update_lru(DbName, Options) -> ok end. - create(DbName, Options) -> try validate_open_or_create(DbName, Options), create_int(DbName, Options) - catch throw:{?MODULE, Error} -> - Error + catch + throw:{?MODULE, Error} -> + Error end. create_int(DbName, Options0) -> Options = maybe_add_sys_db_callbacks(DbName, Options0), couch_partition:validate_dbname(DbName, Options), case gen_server:call(couch_server(DbName), {create, DbName, Options}, infinity) of - {ok, Db0} -> - Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}), - {ok, Db1} = couch_db:incref(Db0), - couch_db:set_user_ctx(Db1, Ctx); - Error -> - Error + {ok, Db0} -> + Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}), + {ok, Db1} = couch_db:incref(Db0), + couch_db:set_user_ctx(Db1, Ctx); + Error -> + Error end. delete(DbName, Options) -> gen_server:call(couch_server(DbName), {delete, DbName, Options}, infinity). - exists(DbName) -> RootDir = config:get("couchdb", "database_dir", "."), Engines = get_configured_engines(), Possible = get_possible_engines(DbName, RootDir, Engines), Possible /= []. - delete_compaction_files(DbName) -> delete_compaction_files(DbName, []). delete_compaction_files(DbName, DelOpts) when is_list(DbName) -> RootDir = config:get("couchdb", "database_dir", "."), - lists:foreach(fun({Ext, Engine}) -> - FPath = make_filepath(RootDir, DbName, Ext), - couch_db_engine:delete_compaction_files(Engine, RootDir, FPath, DelOpts) - end, get_configured_engines()), + lists:foreach( + fun({Ext, Engine}) -> + FPath = make_filepath(RootDir, DbName, Ext), + couch_db_engine:delete_compaction_files(Engine, RootDir, FPath, DelOpts) + end, + get_configured_engines() + ), ok; delete_compaction_files(DbName, DelOpts) when is_binary(DbName) -> delete_compaction_files(?b2l(DbName), DelOpts). @@ -185,22 +190,32 @@ maybe_add_sys_db_callbacks(DbName, Options) -> IsReplicatorDb = path_ends_with(DbName, "_replicator"), UsersDbSuffix = config:get("couchdb", "users_db_suffix", "_users"), - IsUsersDb = path_ends_with(DbName, "_users") - orelse path_ends_with(DbName, UsersDbSuffix), + IsUsersDb = + path_ends_with(DbName, "_users") orelse + path_ends_with(DbName, UsersDbSuffix), if DbName == DbsDbName -> - [{before_doc_update, fun mem3_bdu:before_doc_update/3}, - sys_db | Options]; + [ + {before_doc_update, fun mem3_bdu:before_doc_update/3}, + sys_db + | Options + ]; DbName == NodesDbName -> [sys_db | Options]; IsReplicatorDb -> - [{before_doc_update, fun couch_replicator_docs:before_doc_update/3}, - {after_doc_read, fun couch_replicator_docs:after_doc_read/2}, - sys_db | Options]; + [ + {before_doc_update, fun couch_replicator_docs:before_doc_update/3}, + {after_doc_read, fun couch_replicator_docs:after_doc_read/2}, + sys_db + | Options + ]; IsUsersDb -> - [{before_doc_update, fun couch_users_db:before_doc_update/3}, - {after_doc_read, fun couch_users_db:after_doc_read/2}, - sys_db | Options]; + [ + {before_doc_update, fun couch_users_db:before_doc_update/3}, + {after_doc_read, fun couch_users_db:after_doc_read/2}, + sys_db + | Options + ]; true -> Options end. @@ -215,11 +230,11 @@ check_dbname(DbName) -> is_admin(User, ClearPwd) -> case config:get("admins", User) of - "-hashed-" ++ HashedPwdAndSalt -> - [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","), - couch_util:to_hex(crypto:hash(sha, ClearPwd ++ Salt)) == HashedPwd; - _Else -> - false + "-hashed-" ++ HashedPwdAndSalt -> + [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","), + couch_util:to_hex(crypto:hash(sha, ClearPwd ++ Salt)) == HashedPwd; + _Else -> + false end. has_admins() -> @@ -233,7 +248,9 @@ hash_admin_passwords(Persist) -> fun({User, ClearPassword}) -> HashedPassword = couch_passwords:hash_admin_password(ClearPassword), config:set("admins", User, ?b2l(HashedPassword), Persist) - end, couch_passwords:get_unhashed_admins()). + end, + couch_passwords:get_unhashed_admins() + ). close_db_if_idle(DbName) -> case ets:lookup(couch_dbs(DbName), DbName) of @@ -243,7 +260,6 @@ close_db_if_idle(DbName) -> ok end. - init([N]) -> couch_util:set_mqd_off_heap(?MODULE), couch_util:set_process_priority(?MODULE, high), @@ -260,7 +276,8 @@ init([N]) -> % Mark if fips is enabled case erlang:function_exported(crypto, info_fips, 0) andalso - crypto:info_fips() == enabled of + crypto:info_fips() == enabled + of true -> config:enable_feature('fips'); false -> @@ -276,7 +293,8 @@ init([N]) -> Engines = get_configured_engines(), MaxDbsOpen = config:get_integer("couchdb", "max_dbs_open", ?MAX_DBS_OPEN), UpdateLruOnRead = config:get_boolean( - "couchdb", "update_lru_on_read", false), + "couchdb", "update_lru_on_read", false + ), ok = config:listen_for_changes(?MODULE, N), ok = couch_file:init_delete_dir(RootDir), hash_admin_passwords(), @@ -295,44 +313,55 @@ init([N]) -> {read_concurrency, true} ]), process_flag(trap_exit, true), - {ok, #server{root_dir=RootDir, - engines = Engines, - max_dbs_open=per_couch_server(MaxDbsOpen), - update_lru_on_read=UpdateLruOnRead, - start_time=couch_util:rfc1123_date(), - couch_dbs=couch_dbs(N), - couch_dbs_pid_to_name=couch_dbs_pid_to_name(N), - couch_dbs_locks=couch_dbs_locks(N)}}. + {ok, #server{ + root_dir = RootDir, + engines = Engines, + max_dbs_open = per_couch_server(MaxDbsOpen), + update_lru_on_read = UpdateLruOnRead, + start_time = couch_util:rfc1123_date(), + couch_dbs = couch_dbs(N), + couch_dbs_pid_to_name = couch_dbs_pid_to_name(N), + couch_dbs_locks = couch_dbs_locks(N) + }}. terminate(Reason, Srv) -> - couch_log:error("couch_server terminating with ~p, state ~2048p", - [Reason, - Srv#server{lru = redacted}]), - ets:foldl(fun(#entry{db = Db}, _) -> - % Filter out any entry records for open_async - % processes that haven't finished. - if Db == undefined -> ok; true -> - couch_util:shutdown_sync(couch_db:get_pid(Db)) - end - end, nil, couch_dbs(Srv)), + couch_log:error( + "couch_server terminating with ~p, state ~2048p", + [ + Reason, + Srv#server{lru = redacted} + ] + ), + ets:foldl( + fun(#entry{db = Db}, _) -> + % Filter out any entry records for open_async + % processes that haven't finished. + if + Db == undefined -> ok; + true -> couch_util:shutdown_sync(couch_db:get_pid(Db)) + end + end, + nil, + couch_dbs(Srv) + ), ok. handle_config_change("couchdb", "database_dir", _, _, _) -> exit(whereis(couch_server), config_change), remove_handler; handle_config_change("couchdb", "update_lru_on_read", "true", _, N) -> - gen_server:call(couch_server(N),{set_update_lru_on_read,true}), + gen_server:call(couch_server(N), {set_update_lru_on_read, true}), {ok, N}; handle_config_change("couchdb", "update_lru_on_read", _, _, N) -> - gen_server:call(couch_server(N),{set_update_lru_on_read,false}), + gen_server:call(couch_server(N), {set_update_lru_on_read, false}), {ok, N}; handle_config_change("couchdb", "max_dbs_open", Max0, _, N) when is_list(Max0) -> Max1 = per_couch_server(list_to_integer(Max0)), - gen_server:call(couch_server(N),{set_max_dbs_open,Max1}), + gen_server:call(couch_server(N), {set_max_dbs_open, Max1}), {ok, N}; handle_config_change("couchdb", "max_dbs_open", _, _, N) -> Max = per_couch_server(?MAX_DBS_OPEN), - gen_server:call(couch_server(N),{set_max_dbs_open,Max}), + gen_server:call(couch_server(N), {set_max_dbs_open, Max}), {ok, N}; handle_config_change("couchdb_engines", _, _, _, N) -> gen_server:call(couch_server(N), reload_engines), @@ -361,57 +390,64 @@ handle_config_terminate(_, stop, _) -> handle_config_terminate(_Server, _Reason, N) -> erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), {restart_config_listener, N}). - per_couch_server(X) -> erlang:max(1, X div num_servers()). - all_databases() -> {ok, DbList} = all_databases( - fun(DbName, Acc) -> {ok, [DbName | Acc]} end, []), + fun(DbName, Acc) -> {ok, [DbName | Acc]} end, [] + ), {ok, lists:usort(DbList)}. all_databases(Fun, Acc0) -> - {ok, #server{root_dir=Root}} = gen_server:call(couch_server_1, get_server), + {ok, #server{root_dir = Root}} = gen_server:call(couch_server_1, get_server), NormRoot = couch_util:normpath(Root), Extensions = get_engine_extensions(), ExtRegExp = "(" ++ string:join(Extensions, "|") ++ ")", RegExp = - "^[a-z0-9\\_\\$()\\+\\-]*" % stock CouchDB name regex - "(\\.[0-9]{10,})?" % optional shard timestamp - "\\." ++ ExtRegExp ++ "$", % filename extension - FinalAcc = try - couch_util:fold_files(Root, - RegExp, - true, - fun(Filename, AccIn) -> - NormFilename = couch_util:normpath(Filename), - case NormFilename -- NormRoot of - [$/ | RelativeFilename] -> ok; - RelativeFilename -> ok + % stock CouchDB name regex + "^[a-z0-9\\_\\$()\\+\\-]*" + % optional shard timestamp + "(\\.[0-9]{10,})?" + % filename extension + "\\." ++ ExtRegExp ++ "$", + FinalAcc = + try + couch_util:fold_files( + Root, + RegExp, + true, + fun(Filename, AccIn) -> + NormFilename = couch_util:normpath(Filename), + case NormFilename -- NormRoot of + [$/ | RelativeFilename] -> ok; + RelativeFilename -> ok + end, + Ext = filename:extension(RelativeFilename), + case Fun(?l2b(filename:rootname(RelativeFilename, Ext)), AccIn) of + {ok, NewAcc} -> NewAcc; + {stop, NewAcc} -> throw({stop, Fun, NewAcc}) + end end, - Ext = filename:extension(RelativeFilename), - case Fun(?l2b(filename:rootname(RelativeFilename, Ext)), AccIn) of - {ok, NewAcc} -> NewAcc; - {stop, NewAcc} -> throw({stop, Fun, NewAcc}) - end - end, Acc0) - catch throw:{stop, Fun, Acc1} -> - Acc1 - end, + Acc0 + ) + catch + throw:{stop, Fun, Acc1} -> + Acc1 + end, {ok, FinalAcc}. - make_room(Server, Options) -> case lists:member(sys_db, Options) of false -> maybe_close_lru_db(Server); true -> {ok, Server} end. -maybe_close_lru_db(#server{dbs_open=NumOpen, max_dbs_open=MaxOpen}=Server) - when NumOpen < MaxOpen -> +maybe_close_lru_db(#server{dbs_open = NumOpen, max_dbs_open = MaxOpen} = Server) when + NumOpen < MaxOpen +-> {ok, Server}; -maybe_close_lru_db(#server{lru=Lru}=Server) -> +maybe_close_lru_db(#server{lru = Lru} = Server) -> case couch_lru:close(Lru) of {true, NewLru} -> {ok, db_closed(Server#server{lru = NewLru}, [])}; @@ -427,10 +463,11 @@ open_async(Server, From, DbName, Options) -> T0 = os:timestamp(), Opener = spawn_link(fun() -> Res = open_async_int(NoLRUServer, DbName, Options), - IsSuccess = case Res of - {ok, _} -> true; - _ -> false - end, + IsSuccess = + case Res of + {ok, _} -> true; + _ -> false + end, case IsSuccess andalso lists:member(create, Options) of true -> couch_event:notify(DbName, created); @@ -449,10 +486,11 @@ open_async(Server, From, DbName, Options) -> couch_log:info("open_result error ~p for ~s", [Res, DbName]) end end), - ReqType = case lists:member(create, Options) of - true -> create; - false -> open - end, + ReqType = + case lists:member(create, Options) of + true -> create; + false -> open + end, true = ets:insert(couch_dbs(Server), #entry{ name = DbName, pid = Opener, @@ -478,7 +516,7 @@ open_async_int(Server, DbName, Options) -> Error1 end. -handle_call(close_lru, _From, #server{lru=Lru} = Server) -> +handle_call(close_lru, _From, #server{lru = Lru} = Server) -> case couch_lru:close(Lru) of {true, NewLru} -> {reply, ok, db_closed(Server#server{lru = NewLru}, [])}; @@ -488,9 +526,9 @@ handle_call(close_lru, _From, #server{lru=Lru} = Server) -> handle_call(open_dbs_count, _From, Server) -> {reply, Server#server.dbs_open, Server}; handle_call({set_update_lru_on_read, UpdateOnRead}, _From, Server) -> - {reply, ok, Server#server{update_lru_on_read=UpdateOnRead}}; + {reply, ok, Server#server{update_lru_on_read = UpdateOnRead}}; handle_call({set_max_dbs_open, Max}, _From, Server) -> - {reply, ok, Server#server{max_dbs_open=Max}}; + {reply, ok, Server#server{max_dbs_open = Max}}; handle_call(reload_engines, _From, Server) -> {reply, ok, Server#server{engines = get_configured_engines()}}; handle_call(get_server, _From, Server) -> @@ -522,12 +560,13 @@ handle_call({open_result, DbName, {ok, Db}}, {Opener, _}, Server) -> start_time = couch_db:get_instance_start_time(Db) }), true = ets:insert(couch_dbs_pid_to_name(Server), {DbPid, DbName}), - Lru = case couch_db:is_system_db(Db) of - false -> - couch_lru:insert(DbName, Server#server.lru); - true -> - Server#server.lru - end, + Lru = + case couch_db:is_system_db(Db) of + false -> + couch_lru:insert(DbName, Server#server.lru); + true -> + Server#server.lru + end, {reply, ok, Server#server{lru = Lru}}; [#entry{}] -> % A mismatched opener pid means that this open_result message @@ -547,12 +586,13 @@ handle_call({open_result, DbName, Error}, {Opener, _}, Server) -> [gen_server:reply(Waiter, Error) || Waiter <- Waiters], true = ets:delete(couch_dbs(Server), DbName), true = ets:delete(couch_dbs_pid_to_name(Server), Opener), - NewServer = case ReqType of - {create, DbName, Options, CrFrom} -> - open_async(Server, CrFrom, DbName, Options); - _ -> - Server - end, + NewServer = + case ReqType of + {create, DbName, Options, CrFrom} -> + open_async(Server, CrFrom, DbName, Options); + _ -> + Server + end, {reply, ok, db_closed(NewServer, Entry#entry.db_options)}; [#entry{}] -> % A mismatched pid means that this open_result message @@ -561,159 +601,174 @@ handle_call({open_result, DbName, Error}, {Opener, _}, Server) -> end; handle_call({open, DbName, Options}, From, Server) -> case ets:lookup(couch_dbs(Server), DbName) of - [] -> - case make_room(Server, Options) of - {ok, Server2} -> - {noreply, open_async(Server2, From, DbName, Options)}; - CloseError -> - {reply, CloseError, Server} - end; - [#entry{waiters = Waiters} = Entry] when is_list(Waiters) -> - true = ets:insert(couch_dbs(Server), Entry#entry{waiters = [From | Waiters]}), - NumWaiters = length(Waiters), - if NumWaiters =< 10 orelse NumWaiters rem 10 /= 0 -> ok; true -> - Fmt = "~b clients waiting to open db ~s", - couch_log:info(Fmt, [length(Waiters), DbName]) - end, - {noreply, Server}; - [#entry{db = Db}] -> - {reply, {ok, Db}, Server} + [] -> + case make_room(Server, Options) of + {ok, Server2} -> + {noreply, open_async(Server2, From, DbName, Options)}; + CloseError -> + {reply, CloseError, Server} + end; + [#entry{waiters = Waiters} = Entry] when is_list(Waiters) -> + true = ets:insert(couch_dbs(Server), Entry#entry{waiters = [From | Waiters]}), + NumWaiters = length(Waiters), + if + NumWaiters =< 10 orelse NumWaiters rem 10 /= 0 -> + ok; + true -> + Fmt = "~b clients waiting to open db ~s", + couch_log:info(Fmt, [length(Waiters), DbName]) + end, + {noreply, Server}; + [#entry{db = Db}] -> + {reply, {ok, Db}, Server} end; handle_call({create, DbName, Options}, From, Server) -> case ets:lookup(couch_dbs(Server), DbName) of - [] -> - case make_room(Server, Options) of - {ok, Server2} -> + [] -> + case make_room(Server, Options) of + {ok, Server2} -> + CrOptions = [create | Options], + {noreply, open_async(Server2, From, DbName, CrOptions)}; + CloseError -> + {reply, CloseError, Server} + end; + [#entry{req_type = open} = Entry] -> + % We're trying to create a database while someone is in + % the middle of trying to open it. We allow one creator + % to wait while we figure out if it'll succeed. CrOptions = [create | Options], - {noreply, open_async(Server2, From, DbName, CrOptions)}; - CloseError -> - {reply, CloseError, Server} - end; - [#entry{req_type = open} = Entry] -> - % We're trying to create a database while someone is in - % the middle of trying to open it. We allow one creator - % to wait while we figure out if it'll succeed. - CrOptions = [create | Options], - Req = {create, DbName, CrOptions, From}, - true = ets:insert(couch_dbs(Server), Entry#entry{req_type = Req}), - {noreply, Server}; - [_AlreadyRunningDb] -> - {reply, file_exists, Server} + Req = {create, DbName, CrOptions, From}, + true = ets:insert(couch_dbs(Server), Entry#entry{req_type = Req}), + {noreply, Server}; + [_AlreadyRunningDb] -> + {reply, file_exists, Server} end; handle_call({delete, DbName, Options}, _From, Server) -> DbNameList = binary_to_list(DbName), case check_dbname(DbNameList) of - ok -> - Server2 = - case ets:lookup(couch_dbs(Server), DbName) of - [] -> Server; - [#entry{pid = Pid, waiters = Waiters} = Entry] when is_list(Waiters) -> - true = ets:delete(couch_dbs(Server), DbName), - true = ets:delete(couch_dbs_pid_to_name(Server), Pid), - exit(Pid, kill), - [gen_server:reply(Waiter, not_found) || Waiter <- Waiters], - db_closed(Server, Entry#entry.db_options); - [#entry{pid = Pid} = Entry] -> - true = ets:delete(couch_dbs(Server), DbName), - true = ets:delete(couch_dbs_pid_to_name(Server), Pid), - exit(Pid, kill), - db_closed(Server, Entry#entry.db_options) - end, + ok -> + Server2 = + case ets:lookup(couch_dbs(Server), DbName) of + [] -> + Server; + [#entry{pid = Pid, waiters = Waiters} = Entry] when is_list(Waiters) -> + true = ets:delete(couch_dbs(Server), DbName), + true = ets:delete(couch_dbs_pid_to_name(Server), Pid), + exit(Pid, kill), + [gen_server:reply(Waiter, not_found) || Waiter <- Waiters], + db_closed(Server, Entry#entry.db_options); + [#entry{pid = Pid} = Entry] -> + true = ets:delete(couch_dbs(Server), DbName), + true = ets:delete(couch_dbs_pid_to_name(Server), Pid), + exit(Pid, kill), + db_closed(Server, Entry#entry.db_options) + end, - couch_db_plugin:on_delete(DbName, Options), + couch_db_plugin:on_delete(DbName, Options), - DelOpt = [{context, delete} | Options], + DelOpt = [{context, delete} | Options], - % Make sure and remove all compaction data - delete_compaction_files(DbNameList, Options), + % Make sure and remove all compaction data + delete_compaction_files(DbNameList, Options), - {ok, {Engine, FilePath}} = get_engine(Server, DbNameList), - RootDir = Server#server.root_dir, - case couch_db_engine:delete(Engine, RootDir, FilePath, DelOpt) of - ok -> - couch_event:notify(DbName, deleted), - {reply, ok, Server2}; - {error, enoent} -> - {reply, not_found, Server2}; - Else -> - {reply, Else, Server2} - end; - Error -> - {reply, Error, Server} + {ok, {Engine, FilePath}} = get_engine(Server, DbNameList), + RootDir = Server#server.root_dir, + case couch_db_engine:delete(Engine, RootDir, FilePath, DelOpt) of + ok -> + couch_event:notify(DbName, deleted), + {reply, ok, Server2}; + {error, enoent} -> + {reply, not_found, Server2}; + Else -> + {reply, Else, Server2} + end; + Error -> + {reply, Error, Server} end; handle_call({db_updated, Db}, _From, Server0) -> DbName = couch_db:name(Db), StartTime = couch_db:get_instance_start_time(Db), - Server = try ets:lookup_element(couch_dbs(Server0), DbName, #entry.start_time) of - StartTime -> - true = ets:update_element(couch_dbs(Server0), DbName, {#entry.db, Db}), - Lru = case couch_db:is_system_db(Db) of - false -> couch_lru:update(DbName, Server0#server.lru); - true -> Server0#server.lru - end, - Server0#server{lru = Lru}; - _ -> - Server0 - catch _:_ -> - Server0 - end, + Server = + try ets:lookup_element(couch_dbs(Server0), DbName, #entry.start_time) of + StartTime -> + true = ets:update_element(couch_dbs(Server0), DbName, {#entry.db, Db}), + Lru = + case couch_db:is_system_db(Db) of + false -> couch_lru:update(DbName, Server0#server.lru); + true -> Server0#server.lru + end, + Server0#server{lru = Lru}; + _ -> + Server0 + catch + _:_ -> + Server0 + end, {reply, ok, Server}. -handle_cast({update_lru, DbName}, #server{lru = Lru, update_lru_on_read=true} = Server) -> +handle_cast({update_lru, DbName}, #server{lru = Lru, update_lru_on_read = true} = Server) -> {noreply, Server#server{lru = couch_lru:update(DbName, Lru)}}; handle_cast({update_lru, _DbName}, Server) -> {noreply, Server}; handle_cast({close_db_if_idle, DbName}, Server) -> case ets:update_element(couch_dbs(Server), DbName, {#entry.lock, locked}) of - true -> - [#entry{db = Db, db_options = DbOpts}] = ets:lookup(couch_dbs(Server), DbName), - case couch_db:is_idle(Db) of true -> - DbPid = couch_db:get_pid(Db), - true = ets:delete(couch_dbs(Server), DbName), - true = ets:delete(couch_dbs_pid_to_name(Server), DbPid), - exit(DbPid, kill), - {noreply, db_closed(Server, DbOpts)}; + [#entry{db = Db, db_options = DbOpts}] = ets:lookup(couch_dbs(Server), DbName), + case couch_db:is_idle(Db) of + true -> + DbPid = couch_db:get_pid(Db), + true = ets:delete(couch_dbs(Server), DbName), + true = ets:delete(couch_dbs_pid_to_name(Server), DbPid), + exit(DbPid, kill), + {noreply, db_closed(Server, DbOpts)}; + false -> + true = ets:update_element( + couch_dbs(Server), DbName, {#entry.lock, unlocked} + ), + {noreply, Server} + end; false -> - true = ets:update_element( - couch_dbs(Server), DbName, {#entry.lock, unlocked}), {noreply, Server} - end; - false -> - {noreply, Server} end; - handle_cast(Msg, Server) -> {stop, {unknown_cast_message, Msg}, Server}. -code_change(_OldVsn, #server{}=State, _Extra) -> +code_change(_OldVsn, #server{} = State, _Extra) -> {ok, State}. handle_info({'EXIT', _Pid, config_change}, Server) -> {stop, config_change, Server}; handle_info({'EXIT', Pid, Reason}, Server) -> case ets:lookup(couch_dbs_pid_to_name(Server), Pid) of - [{Pid, DbName}] -> - [#entry{waiters = Waiters} = Entry] = ets:lookup(couch_dbs(Server), DbName), - if Reason /= snappy_nif_not_loaded -> ok; true -> - Msg = io_lib:format("To open the database `~s`, Apache CouchDB " - "must be built with Erlang OTP R13B04 or higher.", [DbName]), - couch_log:error(Msg, []) - end, - % We kill databases on purpose so there's no reason - % to log that fact. So we restrict logging to "interesting" - % reasons. - if Reason == normal orelse Reason == killed -> ok; true -> - couch_log:info("db ~s died with reason ~p", [DbName, Reason]) - end, - if not is_list(Waiters) -> ok; true -> - [gen_server:reply(Waiter, Reason) || Waiter <- Waiters] - end, - true = ets:delete(couch_dbs(Server), DbName), - true = ets:delete(couch_dbs_pid_to_name(Server), Pid), - {noreply, db_closed(Server, Entry#entry.db_options)}; - [] -> - {noreply, Server} + [{Pid, DbName}] -> + [#entry{waiters = Waiters} = Entry] = ets:lookup(couch_dbs(Server), DbName), + if + Reason /= snappy_nif_not_loaded -> + ok; + true -> + Msg = io_lib:format( + "To open the database `~s`, Apache CouchDB " + "must be built with Erlang OTP R13B04 or higher.", + [DbName] + ), + couch_log:error(Msg, []) + end, + % We kill databases on purpose so there's no reason + % to log that fact. So we restrict logging to "interesting" + % reasons. + if + Reason == normal orelse Reason == killed -> ok; + true -> couch_log:info("db ~s died with reason ~p", [DbName, Reason]) + end, + if + not is_list(Waiters) -> ok; + true -> [gen_server:reply(Waiter, Reason) || Waiter <- Waiters] + end, + true = ets:delete(couch_dbs(Server), DbName), + true = ets:delete(couch_dbs_pid_to_name(Server), Pid), + {noreply, db_closed(Server, Entry#entry.db_options)}; + [] -> + {noreply, Server} end; handle_info({restart_config_listener, N}, State) -> ok = config:listen_for_changes(?MODULE, N), @@ -723,13 +778,13 @@ handle_info(Info, Server) -> db_opened(Server, Options) -> case lists:member(sys_db, Options) of - false -> Server#server{dbs_open=Server#server.dbs_open + 1}; + false -> Server#server{dbs_open = Server#server.dbs_open + 1}; true -> Server end. db_closed(Server, Options) -> case lists:member(sys_db, Options) of - false -> Server#server{dbs_open=Server#server.dbs_open - 1}; + false -> Server#server{dbs_open = Server#server.dbs_open - 1}; true -> Server end. @@ -757,13 +812,17 @@ validate_open_or_create(DbName, Options) -> get_configured_engines() -> ConfigEntries = config:get("couchdb_engines"), - Engines = lists:flatmap(fun({Extension, ModuleStr}) -> - try - [{Extension, list_to_atom(ModuleStr)}] - catch _T:_R -> - [] - end - end, ConfigEntries), + Engines = lists:flatmap( + fun({Extension, ModuleStr}) -> + try + [{Extension, list_to_atom(ModuleStr)}] + catch + _T:_R -> + [] + end + end, + ConfigEntries + ), case Engines of [] -> [{"couch", couch_bt_engine}]; @@ -771,7 +830,6 @@ get_configured_engines() -> Else end. - get_engine(Server, DbName, Options) -> #server{ root_dir = RootDir, @@ -791,7 +849,6 @@ get_engine(Server, DbName, Options) -> get_engine(Server, DbName) end. - get_engine(Server, DbName) -> #server{ root_dir = RootDir, @@ -807,18 +864,20 @@ get_engine(Server, DbName) -> erlang:error(engine_conflict) end. - get_possible_engines(DbName, RootDir, Engines) -> - lists:foldl(fun({Extension, Engine}, Acc) -> - Path = make_filepath(RootDir, DbName, Extension), - case couch_db_engine:exists(Engine, Path) of - true -> - [{Engine, Path} | Acc]; - false -> - Acc - end - end, [], Engines). - + lists:foldl( + fun({Extension, Engine}, Acc) -> + Path = make_filepath(RootDir, DbName, Extension), + case couch_db_engine:exists(Engine, Path) of + true -> + [{Engine, Path} | Acc]; + false -> + Acc + end + end, + [], + Engines + ). get_default_engine(Server, DbName) -> #server{ @@ -831,15 +890,15 @@ get_default_engine(Server, DbName) -> {Extension, Module} -> {ok, {Module, make_filepath(RootDir, DbName, Extension)}}; false -> - Fmt = "Invalid storage engine extension ~s," - " configured engine extensions are: ~s", + Fmt = + "Invalid storage engine extension ~s," + " configured engine extensions are: ~s", Exts = [E || {E, _} <- Engines], Args = [Extension, string:join(Exts, ", ")], couch_log:error(Fmt, Args), {ok, Default} end. - make_filepath(RootDir, DbName, Extension) when is_binary(RootDir) -> make_filepath(binary_to_list(RootDir), DbName, Extension); make_filepath(RootDir, DbName, Extension) when is_binary(DbName) -> @@ -849,7 +908,6 @@ make_filepath(RootDir, DbName, Extension) when is_binary(Extension) -> make_filepath(RootDir, DbName, Extension) -> filename:join([RootDir, "./" ++ DbName ++ "." ++ Extension]). - get_engine_extensions() -> case config:get("couchdb_engines") of [] -> @@ -858,7 +916,6 @@ get_engine_extensions() -> [Ext || {Ext, _Mod} <- Entries] end. - check_engine(Options) -> case couch_util:get_value(engine, Options) of Ext when is_binary(Ext) -> @@ -874,7 +931,6 @@ check_engine(Options) -> ok end. - get_engine_path(DbName, Engine) when is_binary(DbName), is_atom(Engine) -> RootDir = config:get("couchdb", "database_dir", "."), case lists:keyfind(Engine, 2, get_configured_engines()) of @@ -897,60 +953,48 @@ unlock(DbName) when is_binary(DbName) -> true = ets:delete(couch_dbs_locks(DbName), DbName), ok. - db_updated(Db) -> DbName = couch_db:name(Db), gen_server:call(couch_server(DbName), {db_updated, Db}, infinity). - couch_server(Arg) -> name("couch_server", Arg). - couch_dbs(Arg) -> name("couch_dbs", Arg). - couch_dbs_pid_to_name(Arg) -> name("couch_dbs_pid_to_name", Arg). - couch_dbs_locks(Arg) -> name("couch_dbs_locks", Arg). - name("couch_dbs", #server{} = Server) -> Server#server.couch_dbs; - name("couch_dbs_pid_to_name", #server{} = Server) -> Server#server.couch_dbs_pid_to_name; - name("couch_dbs_locks", #server{} = Server) -> Server#server.couch_dbs_locks; - name(BaseName, DbName) when is_list(DbName) -> name(BaseName, ?l2b(DbName)); - name(BaseName, DbName) when is_binary(DbName) -> N = 1 + erlang:phash2(DbName, num_servers()), name(BaseName, N); - name(BaseName, N) when is_integer(N), N > 0 -> list_to_atom(BaseName ++ "_" ++ integer_to_list(N)). - num_servers() -> erlang:system_info(schedulers). - aggregate_queue_len() -> N = num_servers(), Names = [couch_server(I) || I <- lists:seq(1, N)], - MQs = [process_info(whereis(Name), message_queue_len) || - Name <- Names], + MQs = [ + process_info(whereis(Name), message_queue_len) + || Name <- Names + ], lists:sum([X || {_, X} <- MQs]). - -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). @@ -1006,10 +1050,13 @@ should_add_sys_db_callbacks() -> "_replicator.couch", "_replicator" ], - lists:foreach(fun(DbName) -> - check_case(DbName, true), - check_case(?l2b(DbName), true) - end, Cases). + lists:foreach( + fun(DbName) -> + check_case(DbName, true), + check_case(?l2b(DbName), true) + end, + Cases + ). should_not_add_sys_db_callbacks() -> Cases = [ @@ -1021,10 +1068,13 @@ should_not_add_sys_db_callbacks() -> "mydb.couch", "mydb" ], - lists:foreach(fun(DbName) -> - check_case(DbName, false), - check_case(?l2b(DbName), false) - end, Cases). + lists:foreach( + fun(DbName) -> + check_case(DbName, false), + check_case(?l2b(DbName), false) + end, + Cases + ). check_case(DbName, IsAdded) -> Options = maybe_add_sys_db_callbacks(DbName, [other_options]), diff --git a/src/couch/src/couch_stream.erl b/src/couch/src/couch_stream.erl index 2ab46d7e7..12b290820 100644 --- a/src/couch/src/couch_stream.erl +++ b/src/couch/src/couch_stream.erl @@ -14,7 +14,6 @@ -behaviour(gen_server). -vsn(1). - -export([ open/1, open/2, @@ -39,16 +38,14 @@ code_change/3 ]). - -include_lib("couch/include/couch_db.hrl"). -define(DEFAULT_BUFFER_SIZE, 4096). - -record(stream, { engine, opener_monitor, - written_pointers=[], + written_pointers = [], buffer_list = [], buffer_len = 0, max_buffer, @@ -62,39 +59,35 @@ end_encoding_fun }). - open({_StreamEngine, _StreamEngineState} = Engine) -> open(Engine, []). - open({_StreamEngine, _StreamEngineState} = Engine, Options) -> gen_server:start_link(?MODULE, {Engine, self(), erlang:get(io_priority), Options}, []). - close(Pid) -> gen_server:call(Pid, close, infinity). - copy(Src, Dst) -> - foldl(Src, fun(Bin, _) -> - ok = write(Dst, Bin) - end, ok). - + foldl( + Src, + fun(Bin, _) -> + ok = write(Dst, Bin) + end, + ok + ). write(_Pid, <<>>) -> ok; write(Pid, Bin) -> gen_server:call(Pid, {write, Bin}, infinity). - to_disk_term({Engine, EngineState}) -> Engine:to_disk_term(EngineState). - foldl({Engine, EngineState}, Fun, Acc) -> Engine:foldl(EngineState, Fun, Acc). - foldl(Engine, <<>>, Fun, Acc) -> foldl(Engine, Fun, Acc); foldl(Engine, Md5, UserFun, UserAcc) -> @@ -103,18 +96,17 @@ foldl(Engine, Md5, UserFun, UserAcc) -> Md5 = couch_hash:md5_hash_final(Md5Acc), OutAcc. - foldl_decode(Engine, Md5, Enc, UserFun, UserAcc1) -> - {DecDataFun, DecEndFun} = case Enc of - gzip -> ungzip_init(); - identity -> identity_enc_dec_funs() - end, + {DecDataFun, DecEndFun} = + case Enc of + gzip -> ungzip_init(); + identity -> identity_enc_dec_funs() + end, InitAcc = {DecDataFun, UserFun, UserAcc1}, {_, _, UserAcc2} = foldl(Engine, Md5, fun foldl_decode/2, InitAcc), DecEndFun(), UserAcc2. - range_foldl(Engine, From, To, UserFun, UserAcc) when To >= From -> NewEngine = do_seek(Engine, From), InitAcc = {To - From, UserFun, UserAcc}, @@ -126,19 +118,16 @@ range_foldl(Engine, From, To, UserFun, UserAcc) when To >= From -> UserAcc3 end. - foldl_md5(Bin, {Md5Acc, UserFun, UserAcc}) -> NewMd5Acc = couch_hash:md5_hash_update(Md5Acc, Bin), {NewMd5Acc, UserFun, UserFun(Bin, UserAcc)}. - foldl_decode(EncBin, {DecFun, UserFun, UserAcc}) -> case DecFun(EncBin) of <<>> -> {DecFun, UserFun, UserAcc}; Dec -> {DecFun, UserFun, UserFun(Dec, UserAcc)} end. - foldl_length(Bin, {Length, UserFun, UserAcc}) -> BinSize = size(Bin), case BinSize =< Length of @@ -151,24 +140,24 @@ foldl_length(Bin, {Length, UserFun, UserAcc}) -> gzip_init(Options) -> case couch_util:get_value(compression_level, Options, 0) of - Lvl when Lvl >= 1 andalso Lvl =< 9 -> - Z = zlib:open(), - % 15 = ?MAX_WBITS (defined in the zlib module) - % the 16 + ?MAX_WBITS formula was obtained by inspecting zlib:gzip/1 - ok = zlib:deflateInit(Z, Lvl, deflated, 16 + 15, 8, default), - { - fun(Data) -> - zlib:deflate(Z, Data) - end, - fun() -> - Last = zlib:deflate(Z, [], finish), - ok = zlib:deflateEnd(Z), - ok = zlib:close(Z), - Last - end - }; - _ -> - identity_enc_dec_funs() + Lvl when Lvl >= 1 andalso Lvl =< 9 -> + Z = zlib:open(), + % 15 = ?MAX_WBITS (defined in the zlib module) + % the 16 + ?MAX_WBITS formula was obtained by inspecting zlib:gzip/1 + ok = zlib:deflateInit(Z, Lvl, deflated, 16 + 15, 8, default), + { + fun(Data) -> + zlib:deflate(Z, Data) + end, + fun() -> + Last = zlib:deflate(Z, [], finish), + ok = zlib:deflateEnd(Z), + ok = zlib:close(Z), + Last + end + }; + _ -> + identity_enc_dec_funs() end. ungzip_init() -> @@ -190,25 +179,24 @@ identity_enc_dec_funs() -> fun() -> [] end }. - init({Engine, OpenerPid, OpenerPriority, Options}) -> erlang:put(io_priority, OpenerPriority), {EncodingFun, EndEncodingFun} = - case couch_util:get_value(encoding, Options, identity) of - identity -> identity_enc_dec_funs(); - gzip -> gzip_init(Options) - end, + case couch_util:get_value(encoding, Options, identity) of + identity -> identity_enc_dec_funs(); + gzip -> gzip_init(Options) + end, {ok, #stream{ - engine=Engine, - opener_monitor=erlang:monitor(process, OpenerPid), - md5=couch_hash:md5_hash_init(), - identity_md5=couch_hash:md5_hash_init(), - encoding_fun=EncodingFun, - end_encoding_fun=EndEncodingFun, - max_buffer=couch_util:get_value( - buffer_size, Options, ?DEFAULT_BUFFER_SIZE) - } - }. + engine = Engine, + opener_monitor = erlang:monitor(process, OpenerPid), + md5 = couch_hash:md5_hash_init(), + identity_md5 = couch_hash:md5_hash_init(), + encoding_fun = EncodingFun, + end_encoding_fun = EndEncodingFun, + max_buffer = couch_util:get_value( + buffer_size, Options, ?DEFAULT_BUFFER_SIZE + ) + }}. terminate(_Reason, _Stream) -> ok. @@ -224,36 +212,42 @@ handle_call({write, Bin}, _From, Stream) -> md5 = Md5, identity_md5 = IdenMd5, identity_len = IdenLen, - encoding_fun = EncodingFun} = Stream, - if BinSize + BufferLen > Max -> - WriteBin = lists:reverse(Buffer, [Bin]), - IdenMd5_2 = couch_hash:md5_hash_update(IdenMd5, WriteBin), - case EncodingFun(WriteBin) of - [] -> - % case where the encoder did some internal buffering - % (zlib does it for example) - NewEngine = Engine, - WrittenLen2 = WrittenLen, - Md5_2 = Md5; - WriteBin2 -> - NewEngine = do_write(Engine, WriteBin2), - WrittenLen2 = WrittenLen + iolist_size(WriteBin2), - Md5_2 = couch_hash:md5_hash_update(Md5, WriteBin2) - end, + encoding_fun = EncodingFun + } = Stream, + if + BinSize + BufferLen > Max -> + WriteBin = lists:reverse(Buffer, [Bin]), + IdenMd5_2 = couch_hash:md5_hash_update(IdenMd5, WriteBin), + case EncodingFun(WriteBin) of + [] -> + % case where the encoder did some internal buffering + % (zlib does it for example) + NewEngine = Engine, + WrittenLen2 = WrittenLen, + Md5_2 = Md5; + WriteBin2 -> + NewEngine = do_write(Engine, WriteBin2), + WrittenLen2 = WrittenLen + iolist_size(WriteBin2), + Md5_2 = couch_hash:md5_hash_update(Md5, WriteBin2) + end, - {reply, ok, Stream#stream{ - engine = NewEngine, - written_len=WrittenLen2, - buffer_list=[], - buffer_len=0, - md5=Md5_2, - identity_md5=IdenMd5_2, - identity_len=IdenLen + BinSize}, hibernate}; - true -> - {reply, ok, Stream#stream{ - buffer_list=[Bin|Buffer], - buffer_len=BufferLen + BinSize, - identity_len=IdenLen + BinSize}} + {reply, ok, + Stream#stream{ + engine = NewEngine, + written_len = WrittenLen2, + buffer_list = [], + buffer_len = 0, + md5 = Md5_2, + identity_md5 = IdenMd5_2, + identity_len = IdenLen + BinSize + }, + hibernate}; + true -> + {reply, ok, Stream#stream{ + buffer_list = [Bin | Buffer], + buffer_len = BufferLen + BinSize, + identity_len = IdenLen + BinSize + }} end; handle_call(close, _From, Stream) -> #stream{ @@ -265,35 +259,36 @@ handle_call(close, _From, Stream) -> identity_md5 = IdenMd5, identity_len = IdenLen, encoding_fun = EncodingFun, - end_encoding_fun = EndEncodingFun} = Stream, + end_encoding_fun = EndEncodingFun + } = Stream, WriteBin = lists:reverse(Buffer), IdenMd5Final = couch_hash:md5_hash_final(couch_hash:md5_hash_update(IdenMd5, WriteBin)), WriteBin2 = EncodingFun(WriteBin) ++ EndEncodingFun(), Md5Final = couch_hash:md5_hash_final(couch_hash:md5_hash_update(Md5, WriteBin2)), - Result = case WriteBin2 of - [] -> - {do_finalize(Engine), WrittenLen, IdenLen, Md5Final, IdenMd5Final}; - _ -> - NewEngine = do_write(Engine, WriteBin2), - StreamLen = WrittenLen + iolist_size(WriteBin2), - {do_finalize(NewEngine), StreamLen, IdenLen, Md5Final, IdenMd5Final} - end, + Result = + case WriteBin2 of + [] -> + {do_finalize(Engine), WrittenLen, IdenLen, Md5Final, IdenMd5Final}; + _ -> + NewEngine = do_write(Engine, WriteBin2), + StreamLen = WrittenLen + iolist_size(WriteBin2), + {do_finalize(NewEngine), StreamLen, IdenLen, Md5Final, IdenMd5Final} + end, erlang:demonitor(MonRef), {stop, normal, Result, Stream}. handle_cast(_Msg, State) -> - {noreply,State}. + {noreply, State}. code_change(_OldVsn, State, _Extra) -> {ok, State}. -handle_info({'DOWN', Ref, _, _, _}, #stream{opener_monitor=Ref} = State) -> +handle_info({'DOWN', Ref, _, _, _}, #stream{opener_monitor = Ref} = State) -> {stop, normal, State}; handle_info(_Info, State) -> {noreply, State}. - do_seek({Engine, EngineState}, Offset) -> {ok, NewState} = Engine:seek(EngineState, Offset), {Engine, NewState}. @@ -305,4 +300,3 @@ do_write({Engine, EngineState}, Data) -> do_finalize({Engine, EngineState}) -> {ok, NewState} = Engine:finalize(EngineState), {Engine, NewState}. - diff --git a/src/couch/src/couch_sup.erl b/src/couch/src/couch_sup.erl index b936c1e5d..033f7115f 100644 --- a/src/couch/src/couch_sup.erl +++ b/src/couch/src/couch_sup.erl @@ -15,7 +15,6 @@ -vsn(1). -behaviour(config_listener). - -export([ start_link/0, init/1, @@ -23,10 +22,8 @@ handle_config_terminate/3 ]). - -include_lib("couch/include/couch_db.hrl"). - start_link() -> assert_admins(), maybe_launch_admin_annoyance_reporter(), @@ -42,43 +39,45 @@ start_link() -> Else end. - init(_Args) -> couch_log:info("Starting ~s", [?MODULE]), - {ok, {{one_for_one,10, 60}, [ - { - config_listener_mon, - {config_listener_mon, start_link, [?MODULE, nil]}, - permanent, - 5000, - worker, - [config_listener_mon] - }, - { - couch_primary_services, - {couch_primary_sup, start_link, []}, - permanent, - infinity, - supervisor, - [couch_primary_sup] - }, - { - couch_secondary_services, - {couch_secondary_sup, start_link, []}, - permanent, - infinity, - supervisor, - [couch_secondary_sup] - } - ]}}. - + {ok, + {{one_for_one, 10, 60}, [ + { + config_listener_mon, + {config_listener_mon, start_link, [?MODULE, nil]}, + permanent, + 5000, + worker, + [config_listener_mon] + }, + { + couch_primary_services, + {couch_primary_sup, start_link, []}, + permanent, + infinity, + supervisor, + [couch_primary_sup] + }, + { + couch_secondary_services, + {couch_secondary_sup, start_link, []}, + permanent, + infinity, + supervisor, + [couch_secondary_sup] + } + ]}}. handle_config_change("daemons", _, _, _, _) -> exit(whereis(?MODULE), shutdown), remove_handler; handle_config_change("couchdb", "util_driver_dir", _, _, _) -> - [Pid] = [P || {collation_driver, P, _, _} - <- supervisor:which_children(couch_primary_services)], + [Pid] = [ + P + || {collation_driver, P, _, _} <- + supervisor:which_children(couch_primary_services) + ], Pid ! reload_driver, {ok, nil}; handle_config_change(_, _, _, _, _) -> @@ -91,44 +90,47 @@ assert_admins() -> couch_log:info("Preflight check: Asserting Admin Account~n", []), case {config:get("admins"), os:getenv("COUCHDB_TEST_ADMIN_PARTY_OVERRIDE")} of {[], false} -> - couch_log:info("~n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n" - ++ " No Admin Account Found, aborting startup. ~n" - ++ " Please configure an admin account in your local.ini file. ~n" - ++ "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n", []), + couch_log:info( + "~n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n" ++ + " No Admin Account Found, aborting startup. ~n" ++ + " Please configure an admin account in your local.ini file. ~n" ++ + "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n", + [] + ), % Wait a second so the log message can make it to the log timer:sleep(500), erlang:halt(1); - _ -> ok + _ -> + ok end. send_no_admin_account_error_message() -> - couch_log:error("No Admin Account configured." - ++ " Please configure an Admin Account in your local.ini file and restart CouchDB.~n", []), + couch_log:error( + "No Admin Account configured." ++ + " Please configure an Admin Account in your local.ini file and restart CouchDB.~n", + [] + ), FiveMinutes = 5 * 1000 * 60, timer:sleep(FiveMinutes), send_no_admin_account_error_message(). - + maybe_launch_admin_annoyance_reporter() -> case os:getenv("COUCHDB_TEST_ADMIN_PARTY_OVERRIDE") of false -> ok; _ -> spawn_link(fun send_no_admin_account_error_message/0) end. - notify_starting() -> couch_log:info("Apache CouchDB ~s is starting.~n", [ couch_server:get_version() ]). - notify_started() -> couch_log:info("Apache CouchDB has started. Time to relax.~n", []). - notify_error(Error) -> couch_log:error("Error starting Apache CouchDB:~n~n ~p~n~n", [Error]). - write_pidfile() -> case init:get_argument(pidfile) of {ok, [PidFile]} -> diff --git a/src/couch/src/couch_task_status.erl b/src/couch/src/couch_task_status.erl index 74247d63d..42d7c4f62 100644 --- a/src/couch/src/couch_task_status.erl +++ b/src/couch/src/couch_task_status.erl @@ -36,36 +36,30 @@ -define(set(L, K, V), lists:keystore(K, 1, L, {K, V})). - start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). - stop() -> gen_server:cast(?MODULE, stop). - all() -> gen_server:call(?MODULE, all). - add_task(Props) -> put(task_status_update, {{0, 0, 0}, 0}), Ts = timestamp(), TaskProps = lists:ukeysort( - 1, [{started_on, Ts}, {updated_on, Ts} | Props]), + 1, [{started_on, Ts}, {updated_on, Ts} | Props] + ), put(task_status_props, TaskProps), gen_server:call(?MODULE, {add_task, TaskProps}). - is_task_added() -> is_list(erlang:get(task_status_props)). - set_update_frequency(Msecs) -> put(task_status_update, {{0, 0, 0}, Msecs * 1000}). - update(Props) -> MergeProps = lists:ukeysort(1, Props), CurrProps = erlang:get(task_status_props), @@ -77,7 +71,6 @@ update(Props) -> persist(TaskProps) end. - get(Props) when is_list(Props) -> TaskProps = erlang:get(task_status_props), [couch_util:get_value(P, TaskProps) || P <- Props]; @@ -85,61 +78,54 @@ get(Prop) -> TaskProps = erlang:get(task_status_props), couch_util:get_value(Prop, TaskProps). - maybe_persist(TaskProps) -> {LastUpdateTime, Frequency} = erlang:get(task_status_update), case timer:now_diff(Now = os:timestamp(), LastUpdateTime) >= Frequency of - true -> - put(task_status_update, {Now, Frequency}), - persist(TaskProps); - false -> - ok + true -> + put(task_status_update, {Now, Frequency}), + persist(TaskProps); + false -> + ok end. - persist(TaskProps0) -> TaskProps = ?set(TaskProps0, updated_on, timestamp(os:timestamp())), put(task_status_props, TaskProps), gen_server:cast(?MODULE, {update_status, self(), TaskProps}). - init([]) -> % read configuration settings and register for configuration changes ets:new(?MODULE, [ordered_set, protected, named_table]), {ok, nil}. - -terminate(_Reason,_State) -> +terminate(_Reason, _State) -> ok. - handle_call({add_task, TaskProps}, {From, _}, Server) -> case ets:lookup(?MODULE, From) of - [] -> - true = ets:insert(?MODULE, {From, TaskProps}), - erlang:monitor(process, From), - {reply, ok, Server}; - [_] -> - {reply, {add_task_error, already_registered}, Server} + [] -> + true = ets:insert(?MODULE, {From, TaskProps}), + erlang:monitor(process, From), + {reply, ok, Server}; + [_] -> + {reply, {add_task_error, already_registered}, Server} end; handle_call(all, _, Server) -> All = [ [{pid, ?l2b(pid_to_list(Pid))}, process_status(Pid) | TaskProps] - || - {Pid, TaskProps} <- ets:tab2list(?MODULE) + || {Pid, TaskProps} <- ets:tab2list(?MODULE) ], {reply, All, Server}. - handle_cast({update_status, Pid, NewProps}, Server) -> case ets:lookup(?MODULE, Pid) of - [{Pid, _CurProps}] -> - couch_log:debug("New task status for ~p: ~p", [Pid, NewProps]), - true = ets:insert(?MODULE, {Pid, NewProps}); - _ -> - % Task finished/died in the meanwhile and we must have received - % a monitor message before this call - ignore. - ok + [{Pid, _CurProps}] -> + couch_log:debug("New task status for ~p: ~p", [Pid, NewProps]), + true = ets:insert(?MODULE, {Pid, NewProps}); + _ -> + % Task finished/died in the meanwhile and we must have received + % a monitor message before this call - ignore. + ok end, {noreply, Server}; handle_cast(stop, State) -> @@ -150,18 +136,15 @@ handle_info({'DOWN', _MonitorRef, _Type, Pid, _Info}, Server) -> ets:delete(?MODULE, Pid), {noreply, Server}. - code_change(_OldVsn, State, _Extra) -> {ok, State}. - timestamp() -> timestamp(os:timestamp()). timestamp({Mega, Secs, _}) -> Mega * 1000000 + Secs. - process_status(Pid) -> case process_info(Pid, status) of undefined -> diff --git a/src/couch/src/couch_totp.erl b/src/couch/src/couch_totp.erl index 56e70d81a..3eff9a583 100644 --- a/src/couch/src/couch_totp.erl +++ b/src/couch/src/couch_totp.erl @@ -14,10 +14,11 @@ -export([generate/5]). -generate(Alg, Key, CounterSecs, StepSecs, OutputLen) - when is_atom(Alg), - is_binary(Key), - is_integer(CounterSecs), - is_integer(StepSecs), - is_integer(OutputLen) -> +generate(Alg, Key, CounterSecs, StepSecs, OutputLen) when + is_atom(Alg), + is_binary(Key), + is_integer(CounterSecs), + is_integer(StepSecs), + is_integer(OutputLen) +-> couch_hotp:generate(Alg, Key, CounterSecs div StepSecs, OutputLen). diff --git a/src/couch/src/couch_users_db.erl b/src/couch/src/couch_users_db.erl index 0c169d3ed..7ef3aee78 100644 --- a/src/couch/src/couch_users_db.erl +++ b/src/couch/src/couch_users_db.erl @@ -42,15 +42,15 @@ % Else % -> save_doc before_doc_update(Doc, Db, _UpdateType) -> - #user_ctx{name=Name} = couch_db:get_user_ctx(Db), + #user_ctx{name = Name} = couch_db:get_user_ctx(Db), DocName = get_doc_name(Doc), case (catch couch_db:check_is_admin(Db)) of - ok -> - save_doc(Doc); - _ when Name =:= DocName orelse Name =:= null -> - save_doc(Doc); - _ -> - throw(not_found) + ok -> + save_doc(Doc); + _ when Name =:= DocName orelse Name =:= null -> + save_doc(Doc); + _ -> + throw(not_found) end. % If newDoc.password == null || newDoc.password == undefined: @@ -60,38 +60,41 @@ before_doc_update(Doc, Db, _UpdateType) -> % newDoc.password_sha = hash_pw(newDoc.password + salt) % newDoc.salt = salt % newDoc.password = null -save_doc(#doc{body={Body}} = Doc) -> +save_doc(#doc{body = {Body}} = Doc) -> %% Support both schemes to smooth migration from legacy scheme Scheme = chttpd_util:get_chttpd_auth_config("password_scheme", "pbkdf2"), case {couch_util:get_value(?PASSWORD, Body), Scheme} of - {null, _} -> % server admins don't have a user-db password entry - Doc; - {undefined, _} -> - Doc; - {ClearPassword, "simple"} -> % deprecated - ok = validate_password(ClearPassword), - Salt = couch_uuids:random(), - PasswordSha = couch_passwords:simple(ClearPassword, Salt), - Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?SIMPLE), - Body1 = ?replace(Body0, ?SALT, Salt), - Body2 = ?replace(Body1, ?PASSWORD_SHA, PasswordSha), - Body3 = proplists:delete(?PASSWORD, Body2), - Doc#doc{body={Body3}}; - {ClearPassword, "pbkdf2"} -> - ok = validate_password(ClearPassword), - Iterations = chttpd_util:get_chttpd_auth_config_integer( - "iterations", 10), - Salt = couch_uuids:random(), - DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations), - Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?PBKDF2), - Body1 = ?replace(Body0, ?ITERATIONS, Iterations), - Body2 = ?replace(Body1, ?DERIVED_KEY, DerivedKey), - Body3 = ?replace(Body2, ?SALT, Salt), - Body4 = proplists:delete(?PASSWORD, Body3), - Doc#doc{body={Body4}}; - {_ClearPassword, Scheme} -> - couch_log:error("[couch_httpd_auth] password_scheme value of '~p' is invalid.", [Scheme]), - throw({forbidden, ?PASSWORD_SERVER_ERROR}) + % server admins don't have a user-db password entry + {null, _} -> + Doc; + {undefined, _} -> + Doc; + % deprecated + {ClearPassword, "simple"} -> + ok = validate_password(ClearPassword), + Salt = couch_uuids:random(), + PasswordSha = couch_passwords:simple(ClearPassword, Salt), + Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?SIMPLE), + Body1 = ?replace(Body0, ?SALT, Salt), + Body2 = ?replace(Body1, ?PASSWORD_SHA, PasswordSha), + Body3 = proplists:delete(?PASSWORD, Body2), + Doc#doc{body = {Body3}}; + {ClearPassword, "pbkdf2"} -> + ok = validate_password(ClearPassword), + Iterations = chttpd_util:get_chttpd_auth_config_integer( + "iterations", 10 + ), + Salt = couch_uuids:random(), + DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations), + Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?PBKDF2), + Body1 = ?replace(Body0, ?ITERATIONS, Iterations), + Body2 = ?replace(Body1, ?DERIVED_KEY, DerivedKey), + Body3 = ?replace(Body2, ?SALT, Salt), + Body4 = proplists:delete(?PASSWORD, Body3), + Doc#doc{body = {Body4}}; + {_ClearPassword, Scheme} -> + couch_log:error("[couch_httpd_auth] password_scheme value of '~p' is invalid.", [Scheme]), + throw({forbidden, ?PASSWORD_SERVER_ERROR}) end. % Validate if a new password matches all RegExp in the password_regexp setting. @@ -104,47 +107,52 @@ validate_password(ClearPassword) -> "[]" -> ok; ValidateConfig -> - RequirementList = case couch_util:parse_term(ValidateConfig) of - {ok, RegExpList} when is_list(RegExpList) -> - RegExpList; - {ok, NonListValue} -> - couch_log:error( - "[couch_httpd_auth] password_regexp value of '~p'" - " is not a list.", - [NonListValue] - ), - throw({forbidden, ?PASSWORD_SERVER_ERROR}); - {error, ErrorInfo} -> - couch_log:error( - "[couch_httpd_auth] password_regexp value of '~p'" - " could not get parsed. ~p", - [ValidateConfig, ErrorInfo] - ), - throw({forbidden, ?PASSWORD_SERVER_ERROR}) - end, - % Check the password on every RegExp. - lists:foreach(fun(RegExpTuple) -> - case get_password_regexp_and_error_msg(RegExpTuple) of - {ok, RegExp, PasswordErrorMsg} -> - check_password(ClearPassword, RegExp, PasswordErrorMsg); - {error} -> + RequirementList = + case couch_util:parse_term(ValidateConfig) of + {ok, RegExpList} when is_list(RegExpList) -> + RegExpList; + {ok, NonListValue} -> + couch_log:error( + "[couch_httpd_auth] password_regexp value of '~p'" + " is not a list.", + [NonListValue] + ), + throw({forbidden, ?PASSWORD_SERVER_ERROR}); + {error, ErrorInfo} -> couch_log:error( - "[couch_httpd_auth] password_regexp part of '~p' " - "is not a RegExp string or " - "a RegExp and Reason tuple.", - [RegExpTuple] + "[couch_httpd_auth] password_regexp value of '~p'" + " could not get parsed. ~p", + [ValidateConfig, ErrorInfo] ), throw({forbidden, ?PASSWORD_SERVER_ERROR}) - end - end, RequirementList), + end, + % Check the password on every RegExp. + lists:foreach( + fun(RegExpTuple) -> + case get_password_regexp_and_error_msg(RegExpTuple) of + {ok, RegExp, PasswordErrorMsg} -> + check_password(ClearPassword, RegExp, PasswordErrorMsg); + {error} -> + couch_log:error( + "[couch_httpd_auth] password_regexp part of '~p' " + "is not a RegExp string or " + "a RegExp and Reason tuple.", + [RegExpTuple] + ), + throw({forbidden, ?PASSWORD_SERVER_ERROR}) + end + end, + RequirementList + ), ok end. % Get the RegExp out of the tuple and combine the the error message. % First is with a Reason string. -get_password_regexp_and_error_msg({RegExp, Reason}) - when is_list(RegExp) andalso is_list(Reason) - andalso length(Reason) > 0 -> +get_password_regexp_and_error_msg({RegExp, Reason}) when + is_list(RegExp) andalso is_list(Reason) andalso + length(Reason) > 0 +-> {ok, RegExp, lists:concat([?REQUIREMENT_ERROR, " ", Reason])}; % With a not correct Reason string. get_password_regexp_and_error_msg({RegExp, _Reason}) when is_list(RegExp) -> @@ -181,36 +189,40 @@ check_password(Password, RegExp, ErrorMsg) -> % -> return doc after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, Db) -> case (catch couch_db:check_is_admin(Db)) of - ok -> - Doc; - _ -> - throw({forbidden, - <<"Only administrators can view design docs in the users database.">>}) + ok -> + Doc; + _ -> + throw( + {forbidden, <<"Only administrators can view design docs in the users database.">>} + ) end; after_doc_read(Doc, Db) -> - #user_ctx{name=Name} = couch_db:get_user_ctx(Db), + #user_ctx{name = Name} = couch_db:get_user_ctx(Db), DocName = get_doc_name(Doc), case (catch couch_db:check_is_admin(Db)) of - ok -> - Doc; - _ when Name =:= DocName -> - Doc; - _ -> - Doc1 = strip_non_public_fields(Doc), - case Doc1 of - #doc{body={[]}} -> - throw(not_found); - _ -> - Doc1 - end + ok -> + Doc; + _ when Name =:= DocName -> + Doc; + _ -> + Doc1 = strip_non_public_fields(Doc), + case Doc1 of + #doc{body = {[]}} -> + throw(not_found); + _ -> + Doc1 + end end. -get_doc_name(#doc{id= <<"org.couchdb.user:", Name/binary>>}) -> +get_doc_name(#doc{id = <<"org.couchdb.user:", Name/binary>>}) -> Name; get_doc_name(_) -> undefined. -strip_non_public_fields(#doc{body={Props}}=Doc) -> - Public = re:split(chttpd_util:get_chttpd_auth_config("public_fields", ""), - "\\s*,\\s*", [{return, binary}]), - Doc#doc{body={[{K, V} || {K, V} <- Props, lists:member(K, Public)]}}. +strip_non_public_fields(#doc{body = {Props}} = Doc) -> + Public = re:split( + chttpd_util:get_chttpd_auth_config("public_fields", ""), + "\\s*,\\s*", + [{return, binary}] + ), + Doc#doc{body = {[{K, V} || {K, V} <- Props, lists:member(K, Public)]}}. diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl index c7dc894d7..b7a6ad39a 100644 --- a/src/couch/src/couch_util.erl +++ b/src/couch/src/couch_util.erl @@ -15,14 +15,14 @@ -export([priv_dir/0, normpath/1, fold_files/5]). -export([should_flush/0, should_flush/1, to_existing_atom/1]). -export([rand32/0, implode/2]). --export([abs_pathname/1,abs_pathname/2, trim/1, drop_dot_couch_ext/1]). +-export([abs_pathname/1, abs_pathname/2, trim/1, drop_dot_couch_ext/1]). -export([encodeBase64Url/1, decodeBase64Url/1]). -export([validate_utf8/1, to_hex/1, parse_term/1, dict_find/3]). -export([get_nested_json_value/2, json_user_ctx/1]). -export([proplist_apply_field/2, json_apply_field/2]). -export([to_binary/1, to_integer/1, to_list/1, url_encode/1]). -export([json_encode/1, json_decode/1, json_decode/2]). --export([verify/2,simple_call/2,shutdown_sync/1]). +-export([verify/2, simple_call/2, shutdown_sync/1]). -export([get_value/2, get_value/3]). -export([reorder_results/2]). -export([url_strip_password/1]). @@ -60,7 +60,6 @@ <<"feature_flags">> ]). - priv_dir() -> case code:priv_dir(couch) of {error, bad_name} -> @@ -68,7 +67,8 @@ priv_dir() -> % renaming src/couch to src/couch. Not really worth the hassle. % -Damien code:priv_dir(couchdb); - Dir -> Dir + Dir -> + Dir end. % Normalize a pathname by removing .. and . components. @@ -84,7 +84,6 @@ normparts(["." | RestParts], Acc) -> normparts([Part | RestParts], Acc) -> normparts(RestParts, [Part | Acc]). - % This is implementation is similar the builtin filelib:fold_files/5 % except that this version will run the user supplied function % on directories that match the regular expression as well. @@ -125,13 +124,21 @@ fold_files2([File | Rest], Dir, RegExp, Recursive, Fun, Acc0) -> % works like list_to_existing_atom, except can be list or binary and it % gives you the original value instead of an error if no existing atom. to_existing_atom(V) when is_list(V) -> - try list_to_existing_atom(V) catch _:_ -> V end; + try + list_to_existing_atom(V) + catch + _:_ -> V + end; to_existing_atom(V) when is_binary(V) -> - try list_to_existing_atom(?b2l(V)) catch _:_ -> V end; + try + list_to_existing_atom(?b2l(V)) + catch + _:_ -> V + end; to_existing_atom(V) when is_atom(V) -> V. -shutdown_sync(Pid) when not is_pid(Pid)-> +shutdown_sync(Pid) when not is_pid(Pid) -> ok; shutdown_sync(Pid) -> MRef = erlang:monitor(process, Pid), @@ -139,23 +146,22 @@ shutdown_sync(Pid) -> catch unlink(Pid), catch exit(Pid, shutdown), receive - {'DOWN', MRef, _, _, _} -> - ok + {'DOWN', MRef, _, _, _} -> + ok end after erlang:demonitor(MRef, [flush]) end. - simple_call(Pid, Message) -> MRef = erlang:monitor(process, Pid), try Pid ! {self(), Message}, receive - {Pid, Result} -> - Result; - {'DOWN', MRef, _, _, Reason} -> - exit(Reason) + {Pid, Result} -> + Result; + {'DOWN', MRef, _, _, Reason} -> + exit(Reason) end after erlang:demonitor(MRef, [flush]) @@ -171,28 +177,40 @@ validate_utf8_fast(B, O) -> <<_:O/binary>> -> true; <<_:O/binary, C1, _/binary>> when - C1 < 128 -> + C1 < 128 + -> validate_utf8_fast(B, 1 + O); <<_:O/binary, C1, C2, _/binary>> when - C1 >= 194, C1 =< 223, - C2 >= 128, C2 =< 191 -> + C1 >= 194, + C1 =< 223, + C2 >= 128, + C2 =< 191 + -> validate_utf8_fast(B, 2 + O); <<_:O/binary, C1, C2, C3, _/binary>> when - C1 >= 224, C1 =< 239, - C2 >= 128, C2 =< 191, - C3 >= 128, C3 =< 191 -> + C1 >= 224, + C1 =< 239, + C2 >= 128, + C2 =< 191, + C3 >= 128, + C3 =< 191 + -> validate_utf8_fast(B, 3 + O); <<_:O/binary, C1, C2, C3, C4, _/binary>> when - C1 >= 240, C1 =< 244, - C2 >= 128, C2 =< 191, - C3 >= 128, C3 =< 191, - C4 >= 128, C4 =< 191 -> + C1 >= 240, + C1 =< 244, + C2 >= 128, + C2 =< 191, + C3 >= 128, + C3 =< 191, + C4 >= 128, + C4 =< 191 + -> validate_utf8_fast(B, 4 + O); _ -> false end. - to_hex(<<Hi:4, Lo:4, Rest/binary>>) -> [nibble_to_hex(Hi), nibble_to_hex(Lo) | to_hex(Rest)]; to_hex(<<>>) -> @@ -217,7 +235,6 @@ nibble_to_hex(13) -> $d; nibble_to_hex(14) -> $e; nibble_to_hex(15) -> $f. - parse_term(Bin) when is_binary(Bin) -> parse_term(binary_to_list(Bin)); parse_term(List) -> @@ -229,16 +246,16 @@ get_value(Key, List) -> get_value(Key, List, Default) -> case lists:keysearch(Key, 1, List) of - {value, {Key,Value}} -> - Value; - false -> - Default + {value, {Key, Value}} -> + Value; + false -> + Default end. -get_nested_json_value({Props}, [Key|Keys]) -> +get_nested_json_value({Props}, [Key | Keys]) -> case couch_util:get_value(Key, Props, nil) of - nil -> throw({not_found, <<"missing json key: ", Key/binary>>}); - Value -> get_nested_json_value(Value, Keys) + nil -> throw({not_found, <<"missing json key: ", Key/binary>>}); + Value -> get_nested_json_value(Value, Keys) end; get_nested_json_value(Value, []) -> Value; @@ -256,15 +273,16 @@ json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) -> json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) -> json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]); json_apply_field({Key, NewValue}, [], Acc) -> - {[{Key, NewValue}|Acc]}. + {[{Key, NewValue} | Acc]}. json_user_ctx(Db) -> ShardName = couch_db:name(Db), Ctx = couch_db:get_user_ctx(Db), - {[{<<"db">>, mem3:dbname(ShardName)}, - {<<"name">>,Ctx#user_ctx.name}, - {<<"roles">>,Ctx#user_ctx.roles}]}. - + {[ + {<<"db">>, mem3:dbname(ShardName)}, + {<<"name">>, Ctx#user_ctx.name}, + {<<"roles">>, Ctx#user_ctx.roles} + ]}. % returns a random integer rand32() -> @@ -276,7 +294,7 @@ rand32() -> abs_pathname(" " ++ Filename) -> % strip leading whitspace abs_pathname(Filename); -abs_pathname([$/ |_]=Filename) -> +abs_pathname([$/ | _] = Filename) -> Filename; abs_pathname(Filename) -> {ok, Cwd} = file:get_cwd(), @@ -287,24 +305,25 @@ abs_pathname(Filename, Dir) -> Name = filename:absname(Filename, Dir ++ "/"), OutFilename = filename:join(fix_path_list(filename:split(Name), [])), % If the filename is a dir (last char slash, put back end slash - case string:right(Filename,1) of - "/" -> - OutFilename ++ "/"; - "\\" -> - OutFilename ++ "/"; - _Else-> - OutFilename + case string:right(Filename, 1) of + "/" -> + OutFilename ++ "/"; + "\\" -> + OutFilename ++ "/"; + _Else -> + OutFilename end. % if this as an executable with arguments, seperate out the arguments % ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"} separate_cmd_args("", CmdAcc) -> {lists:reverse(CmdAcc), ""}; -separate_cmd_args("\\ " ++ Rest, CmdAcc) -> % handle skipped value +% handle skipped value +separate_cmd_args("\\ " ++ Rest, CmdAcc) -> separate_cmd_args(Rest, " \\" ++ CmdAcc); separate_cmd_args(" " ++ Rest, CmdAcc) -> {lists:reverse(CmdAcc), " " ++ Rest}; -separate_cmd_args([Char|Rest], CmdAcc) -> +separate_cmd_args([Char | Rest], CmdAcc) -> separate_cmd_args(Rest, [Char | CmdAcc]). % Is a character whitespace (from https://en.wikipedia.org/wiki/Whitespace_character#Unicode)? @@ -341,7 +360,6 @@ is_whitespace(8288) -> true; is_whitespace(65279) -> true; is_whitespace(_Else) -> false. - % removes leading and trailing whitespace from a string trim(String) when is_binary(String) -> % mirror string:trim() behaviour of returning a binary when a binary is passed in @@ -350,7 +368,6 @@ trim(String) -> String2 = lists:dropwhile(fun is_whitespace/1, String), lists:reverse(lists:dropwhile(fun is_whitespace/1, lists:reverse(String2))). - drop_dot_couch_ext(DbName) when is_binary(DbName) -> PrefixLen = size(DbName) - 6, case DbName of @@ -359,48 +376,53 @@ drop_dot_couch_ext(DbName) when is_binary(DbName) -> Else -> Else end; - drop_dot_couch_ext(DbName) when is_list(DbName) -> binary_to_list(drop_dot_couch_ext(iolist_to_binary(DbName))). - % takes a heirarchical list of dirs and removes the dots ".", double dots % ".." and the corresponding parent dirs. fix_path_list([], Acc) -> lists:reverse(Acc); -fix_path_list([".."|Rest], [_PrevAcc|RestAcc]) -> +fix_path_list([".." | Rest], [_PrevAcc | RestAcc]) -> fix_path_list(Rest, RestAcc); -fix_path_list(["."|Rest], Acc) -> +fix_path_list(["." | Rest], Acc) -> fix_path_list(Rest, Acc); fix_path_list([Dir | Rest], Acc) -> fix_path_list(Rest, [Dir | Acc]). - implode(List, Sep) -> implode(List, Sep, []). implode([], _Sep, Acc) -> lists:flatten(lists:reverse(Acc)); implode([H], Sep, Acc) -> - implode([], Sep, [H|Acc]); -implode([H|T], Sep, Acc) -> - implode(T, Sep, [Sep,H|Acc]). - + implode([], Sep, [H | Acc]); +implode([H | T], Sep, Acc) -> + implode(T, Sep, [Sep, H | Acc]). should_flush() -> should_flush(?FLUSH_MAX_MEM). should_flush(MemThreshHold) -> {memory, ProcMem} = process_info(self(), memory), - BinMem = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end, - 0, element(2,process_info(self(), binary))), - if ProcMem+BinMem > 2*MemThreshHold -> - garbage_collect(), - {memory, ProcMem2} = process_info(self(), memory), - BinMem2 = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end, - 0, element(2,process_info(self(), binary))), - ProcMem2+BinMem2 > MemThreshHold; - true -> false end. + BinMem = lists:foldl( + fun({_Id, Size, _NRefs}, Acc) -> Size + Acc end, + 0, + element(2, process_info(self(), binary)) + ), + if + ProcMem + BinMem > 2 * MemThreshHold -> + garbage_collect(), + {memory, ProcMem2} = process_info(self(), memory), + BinMem2 = lists:foldl( + fun({_Id, Size, _NRefs}, Acc) -> Size + Acc end, + 0, + element(2, process_info(self(), binary)) + ), + ProcMem2 + BinMem2 > MemThreshHold; + true -> + false + end. encodeBase64Url(Url) -> b64url:encode(Url). @@ -410,10 +432,10 @@ decodeBase64Url(Url64) -> dict_find(Key, Dict, DefaultValue) -> case dict:find(Key, Dict) of - {ok, Value} -> - Value; - error -> - DefaultValue + {ok, Value} -> + Value; + error -> + DefaultValue end. to_binary(V) when is_binary(V) -> @@ -448,23 +470,23 @@ to_list(V) -> url_encode(Bin) when is_binary(Bin) -> url_encode(binary_to_list(Bin)); -url_encode([H|T]) -> +url_encode([H | T]) -> if - H >= $a, $z >= H -> - [H|url_encode(T)]; - H >= $A, $Z >= H -> - [H|url_encode(T)]; - H >= $0, $9 >= H -> - [H|url_encode(T)]; - H == $_; H == $.; H == $-; H == $: -> - [H|url_encode(T)]; - true -> - case lists:flatten(io_lib:format("~.16.0B", [H])) of - [X, Y] -> - [$%, X, Y | url_encode(T)]; - [X] -> - [$%, $0, X | url_encode(T)] - end + H >= $a, $z >= H -> + [H | url_encode(T)]; + H >= $A, $Z >= H -> + [H | url_encode(T)]; + H >= $0, $9 >= H -> + [H | url_encode(T)]; + H == $_; H == $.; H == $-; H == $: -> + [H | url_encode(T)]; + true -> + case lists:flatten(io_lib:format("~.16.0B", [H])) of + [X, Y] -> + [$%, X, Y | url_encode(T)]; + [X] -> + [$%, $0, X | url_encode(T)] + end end; url_encode([]) -> []. @@ -483,7 +505,7 @@ json_decode(V, Opts) -> throw({invalid_json, Error}) end. -verify([X|RestX], [Y|RestY], Result) -> +verify([X | RestX], [Y | RestY], Result) -> verify(RestX, RestY, (X bxor Y) bor Result); verify([], [], Result) -> Result == 0. @@ -497,7 +519,8 @@ verify(X, Y) when is_list(X) and is_list(Y) -> false -> false end; -verify(_X, _Y) -> false. +verify(_X, _Y) -> + false. % linear search is faster for small lists, length() is 0.5 ms for 100k list reorder_results(Keys, SortedResults) when length(Keys) < 100 -> @@ -507,10 +530,12 @@ reorder_results(Keys, SortedResults) -> [dict:fetch(Key, KeyDict) || Key <- Keys]. url_strip_password(Url) -> - re:replace(Url, + re:replace( + Url, "(http|https|socks5)://([^:]+):[^@]+@(.*)$", "\\1://\\2:*****@\\3", - [{return, list}]). + [{return, list}] + ). encode_doc_id(#doc{id = Id}) -> encode_doc_id(Id); @@ -528,7 +553,7 @@ normalize_ddoc_id(<<"_design/", _/binary>> = DDocId) -> normalize_ddoc_id(DDocId) when is_binary(DDocId) -> <<"_design/", DDocId/binary>>. -with_db(DbName, Fun) when is_binary(DbName) -> +with_db(DbName, Fun) when is_binary(DbName) -> case couch_db:open_int(DbName, [?ADMIN_CTX]) of {ok, Db} -> try @@ -548,20 +573,26 @@ with_db(Db, Fun) -> end. rfc1123_date() -> - {{YYYY,MM,DD},{Hour,Min,Sec}} = calendar:universal_time(), - DayNumber = calendar:day_of_the_week({YYYY,MM,DD}), + {{YYYY, MM, DD}, {Hour, Min, Sec}} = calendar:universal_time(), + DayNumber = calendar:day_of_the_week({YYYY, MM, DD}), lists:flatten( - io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT", - [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])). + io_lib:format( + "~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT", + [day(DayNumber), DD, month(MM), YYYY, Hour, Min, Sec] + ) + ). rfc1123_date(undefined) -> undefined; rfc1123_date(UniversalTime) -> - {{YYYY,MM,DD},{Hour,Min,Sec}} = UniversalTime, - DayNumber = calendar:day_of_the_week({YYYY,MM,DD}), + {{YYYY, MM, DD}, {Hour, Min, Sec}} = UniversalTime, + DayNumber = calendar:day_of_the_week({YYYY, MM, DD}), lists:flatten( - io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT", - [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])). + io_lib:format( + "~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT", + [day(DayNumber), DD, month(MM), YYYY, Hour, Min, Sec] + ) + ). %% day @@ -598,30 +629,32 @@ boolean_to_integer(true) -> boolean_to_integer(false) -> 0. - validate_positive_int(N) when is_list(N) -> try I = list_to_integer(N), validate_positive_int(I) - catch error:badarg -> - false + catch + error:badarg -> + false end; validate_positive_int(N) when is_integer(N), N > 0 -> true; -validate_positive_int(_) -> false. - +validate_positive_int(_) -> + false. find_in_binary(_B, <<>>) -> not_found; - find_in_binary(B, Data) -> case binary:match(Data, [B], []) of - nomatch -> - MatchLength = erlang:min(byte_size(B), byte_size(Data)), - match_prefix_at_end(binary:part(B, {0, MatchLength}), - binary:part(Data, {byte_size(Data), -MatchLength}), - MatchLength, byte_size(Data) - MatchLength); - {Pos, _Len} -> - {exact, Pos} + nomatch -> + MatchLength = erlang:min(byte_size(B), byte_size(Data)), + match_prefix_at_end( + binary:part(B, {0, MatchLength}), + binary:part(Data, {byte_size(Data), -MatchLength}), + MatchLength, + byte_size(Data) - MatchLength + ); + {Pos, _Len} -> + {exact, Pos} end. match_prefix_at_end(Prefix, Data, PrefixLength, N) -> @@ -630,10 +663,14 @@ match_prefix_at_end(Prefix, Data, PrefixLength, N) -> match_rest_of_prefix([], _Prefix, _Data, _PrefixLength, _N) -> not_found; - match_rest_of_prefix([{Pos, _Len} | Rest], Prefix, Data, PrefixLength, N) -> - case binary:match(binary:part(Data, {PrefixLength, Pos - PrefixLength}), - [binary:part(Prefix, {0, PrefixLength - Pos})], []) of + case + binary:match( + binary:part(Data, {PrefixLength, Pos - PrefixLength}), + [binary:part(Prefix, {0, PrefixLength - Pos})], + [] + ) + of nomatch -> match_rest_of_prefix(Rest, Prefix, Data, PrefixLength, N); {_Pos, _Len1} -> @@ -642,44 +679,42 @@ match_rest_of_prefix([{Pos, _Len} | Rest], Prefix, Data, PrefixLength, N) -> callback_exists(Module, Function, Arity) -> case ensure_loaded(Module) of - true -> - InfoList = Module:module_info(exports), - lists:member({Function, Arity}, InfoList); - false -> - false + true -> + InfoList = Module:module_info(exports), + lists:member({Function, Arity}, InfoList); + false -> + false end. validate_callback_exists(Module, Function, Arity) -> case callback_exists(Module, Function, Arity) of - true -> - ok; - false -> - CallbackStr = lists:flatten( - io_lib:format("~w:~w/~w", [Module, Function, Arity])), - throw({error, - {undefined_callback, CallbackStr, {Module, Function, Arity}}}) + true -> + ok; + false -> + CallbackStr = lists:flatten( + io_lib:format("~w:~w/~w", [Module, Function, Arity]) + ), + throw({error, {undefined_callback, CallbackStr, {Module, Function, Arity}}}) end. - check_md5(_NewSig, <<>>) -> ok; check_md5(Sig, Sig) -> ok; check_md5(_, _) -> throw(md5_mismatch). - set_mqd_off_heap(Module) -> case config:get_boolean("off_heap_mqd", atom_to_list(Module), true) of true -> try erlang:process_flag(message_queue_data, off_heap), ok - catch error:badarg -> + catch + error:badarg -> ok end; false -> ok end. - set_process_priority(Module, Level) -> case config:get_boolean("process_priority", atom_to_list(Module), false) of true -> @@ -689,18 +724,17 @@ set_process_priority(Module, Level) -> ok end. - ensure_loaded(Module) when is_atom(Module) -> case code:ensure_loaded(Module) of - {module, Module} -> - true; - {error, embedded} -> - true; - {error, _} -> - false + {module, Module} -> + true; + {error, embedded} -> + true; + {error, _} -> + false end; -ensure_loaded(_Module) -> false. - +ensure_loaded(_Module) -> + false. %% This is especially useful in gen_servers when you need to call %% a function that does a receive as it would hijack incoming messages. @@ -718,11 +752,9 @@ with_proc(M, F, A, Timeout) -> {error, timeout} end. - process_dict_get(Pid, Key) -> process_dict_get(Pid, Key, undefined). - process_dict_get(Pid, Key, DefaultValue) -> case process_info(Pid, dictionary) of {dictionary, Dict} -> @@ -736,21 +768,18 @@ process_dict_get(Pid, Key, DefaultValue) -> DefaultValue end. - unique_monotonic_integer() -> erlang:unique_integer([monotonic, positive]). - check_config_blacklist(Section) -> case lists:member(Section, ?BLACKLIST_CONFIG_SECTIONS) of - true -> - Msg = <<"Config section blacklisted for modification over HTTP API.">>, - throw({forbidden, Msg}); - _ -> - ok + true -> + Msg = <<"Config section blacklisted for modification over HTTP API.">>, + throw({forbidden, Msg}); + _ -> + ok end. - -ifdef(OTP_RELEASE). -if(?OTP_RELEASE >= 22). @@ -765,7 +794,8 @@ hmac(Alg, Key, Message) -> hmac(Alg, Key, Message) -> crypto:hmac(Alg, Key, Message). --endif. % -if(?OTP_RELEASE >= 22) +% -if(?OTP_RELEASE >= 22) +-endif. -else. @@ -773,4 +803,5 @@ hmac(Alg, Key, Message) -> hmac(Alg, Key, Message) -> crypto:hmac(Alg, Key, Message). --endif. % -ifdef(OTP_RELEASE) +% -ifdef(OTP_RELEASE) +-endif. diff --git a/src/couch/src/couch_uuids.erl b/src/couch/src/couch_uuids.erl index 3fffd04b3..be6089dff 100644 --- a/src/couch/src/couch_uuids.erl +++ b/src/couch/src/couch_uuids.erl @@ -127,23 +127,22 @@ utc_random(ClockSeq) -> utc_suffix(Suffix, ClockSeq, Now) -> OsMicros = micros_since_epoch(Now), - NewClockSeq = if - OsMicros =< ClockSeq -> - % Timestamp is lagging, use ClockSeq as Timestamp - ClockSeq + 1; - OsMicros > ClockSeq -> - % Timestamp advanced, use it, and reset ClockSeq with it - OsMicros - end, + NewClockSeq = + if + OsMicros =< ClockSeq -> + % Timestamp is lagging, use ClockSeq as Timestamp + ClockSeq + 1; + OsMicros > ClockSeq -> + % Timestamp advanced, use it, and reset ClockSeq with it + OsMicros + end, Prefix = io_lib:format("~14.16.0b", [NewClockSeq]), {list_to_binary(Prefix ++ Suffix), NewClockSeq}. - -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). - utc_id_time_does_not_advance_test() -> % Timestamp didn't advance but local clock sequence should and new UUIds % should be generated @@ -156,7 +155,6 @@ utc_id_time_does_not_advance_test() -> ?assertNotEqual(UtcId0, UtcId1), ?assertEqual(ClockSeq1 + 1, ClockSeq2). - utc_id_time_advanced_test() -> % Timestamp advanced, a new UUID generated and also the last clock sequence % is updated to that timestamp. @@ -187,5 +185,4 @@ utc_random_test_time_advance_test() -> ?assertEqual(32, byte_size(UtcRandom)), ?assert(NextClockSeq > micros_since_epoch({1000, 0, 0})). - -endif. diff --git a/src/couch/src/couch_work_queue.erl b/src/couch/src/couch_work_queue.erl index 5d747de82..d767a33be 100644 --- a/src/couch/src/couch_work_queue.erl +++ b/src/couch/src/couch_work_queue.erl @@ -35,21 +35,17 @@ multi_workers = false }). - new(Options) -> gen_server:start_link(couch_work_queue, Options, []). - queue(Wq, Item) when is_binary(Item) -> gen_server:call(Wq, {queue, Item, byte_size(Item)}, infinity); queue(Wq, Item) -> gen_server:call(Wq, {queue, Item, ?term_size(Item)}, infinity). - dequeue(Wq) -> dequeue(Wq, all). - dequeue(Wq, MaxItems) -> try gen_server:call(Wq, {dequeue, MaxItems}, infinity) @@ -57,7 +53,6 @@ dequeue(Wq, MaxItems) -> _:_ -> closed end. - item_count(Wq) -> try gen_server:call(Wq, item_count, infinity) @@ -65,7 +60,6 @@ item_count(Wq) -> _:_ -> closed end. - size(Wq) -> try gen_server:call(Wq, size, infinity) @@ -73,10 +67,8 @@ size(Wq) -> _:_ -> closed end. - close(Wq) -> gen_server:cast(Wq, close). - init(Options) -> Q = #q{ @@ -86,50 +78,47 @@ init(Options) -> }, {ok, Q, hibernate}. - -terminate(_Reason, #q{work_waiters=Workers}) -> +terminate(_Reason, #q{work_waiters = Workers}) -> lists:foreach(fun({W, _}) -> gen_server:reply(W, closed) end, Workers). - handle_call({queue, Item, Size}, From, #q{work_waiters = []} = Q0) -> - Q = Q0#q{size = Q0#q.size + Size, - items = Q0#q.items + 1, - queue = queue:in({Item, Size}, Q0#q.queue)}, - case (Q#q.size >= Q#q.max_size) orelse - (Q#q.items >= Q#q.max_items) of - true -> - {noreply, Q#q{blocked = [From | Q#q.blocked]}, hibernate}; - false -> - {reply, ok, Q, hibernate} + Q = Q0#q{ + size = Q0#q.size + Size, + items = Q0#q.items + 1, + queue = queue:in({Item, Size}, Q0#q.queue) + }, + case + (Q#q.size >= Q#q.max_size) orelse + (Q#q.items >= Q#q.max_items) + of + true -> + {noreply, Q#q{blocked = [From | Q#q.blocked]}, hibernate}; + false -> + {reply, ok, Q, hibernate} end; - handle_call({queue, Item, _}, _From, #q{work_waiters = [{W, _Max} | Rest]} = Q) -> gen_server:reply(W, {ok, [Item]}), {reply, ok, Q#q{work_waiters = Rest}, hibernate}; - handle_call({dequeue, Max}, From, Q) -> #q{work_waiters = Workers, multi_workers = Multi, items = Count} = Q, case {Workers, Multi} of - {[_ | _], false} -> - exit("Only one caller allowed to wait for this work at a time"); - {[_ | _], true} -> - {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}}; - _ -> - case Count of - 0 -> - {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}}; - C when C > 0 -> - deliver_queue_items(Max, Q) - end + {[_ | _], false} -> + exit("Only one caller allowed to wait for this work at a time"); + {[_ | _], true} -> + {noreply, Q#q{work_waiters = Workers ++ [{From, Max}]}}; + _ -> + case Count of + 0 -> + {noreply, Q#q{work_waiters = Workers ++ [{From, Max}]}}; + C when C > 0 -> + deliver_queue_items(Max, Q) + end end; - handle_call(item_count, _From, Q) -> {reply, Q#q.items, Q}; - handle_call(size, _From, Q) -> {reply, Q#q.size, Q}. - deliver_queue_items(Max, Q) -> #q{ queue = Queue, @@ -139,48 +128,45 @@ deliver_queue_items(Max, Q) -> blocked = Blocked } = Q, case (Max =:= all) orelse (Max >= Count) of - false -> - {Items, Size2, Queue2, Blocked2} = dequeue_items( - Max, Size, Queue, Blocked, []), - Q2 = Q#q{ - items = Count - Max, size = Size2, blocked = Blocked2, queue = Queue2 - }, - {reply, {ok, Items}, Q2}; - true -> - lists:foreach(fun(F) -> gen_server:reply(F, ok) end, Blocked), - Q2 = Q#q{items = 0, size = 0, blocked = [], queue = queue:new()}, - Items = [Item || {Item, _} <- queue:to_list(Queue)], - case Close of false -> + {Items, Size2, Queue2, Blocked2} = dequeue_items( + Max, Size, Queue, Blocked, [] + ), + Q2 = Q#q{ + items = Count - Max, size = Size2, blocked = Blocked2, queue = Queue2 + }, {reply, {ok, Items}, Q2}; true -> - {stop, normal, {ok, Items}, Q2} - end + lists:foreach(fun(F) -> gen_server:reply(F, ok) end, Blocked), + Q2 = Q#q{items = 0, size = 0, blocked = [], queue = queue:new()}, + Items = [Item || {Item, _} <- queue:to_list(Queue)], + case Close of + false -> + {reply, {ok, Items}, Q2}; + true -> + {stop, normal, {ok, Items}, Q2} + end end. - dequeue_items(0, Size, Queue, Blocked, DequeuedAcc) -> {lists:reverse(DequeuedAcc), Size, Queue, Blocked}; - dequeue_items(NumItems, Size, Queue, Blocked, DequeuedAcc) -> {{value, {Item, ItemSize}}, Queue2} = queue:out(Queue), case Blocked of - [] -> - Blocked2 = Blocked; - [From | Blocked2] -> - gen_server:reply(From, ok) + [] -> + Blocked2 = Blocked; + [From | Blocked2] -> + gen_server:reply(From, ok) end, dequeue_items( - NumItems - 1, Size - ItemSize, Queue2, Blocked2, [Item | DequeuedAcc]). - + NumItems - 1, Size - ItemSize, Queue2, Blocked2, [Item | DequeuedAcc] + ). handle_cast(close, #q{items = 0} = Q) -> {stop, normal, Q}; - handle_cast(close, Q) -> {noreply, Q#q{close_on_dequeue = true}}. - code_change(_OldVsn, State, _Extra) -> {ok, State}. diff --git a/src/couch/src/test_request.erl b/src/couch/src/test_request.erl index 48f49bda6..d7364012f 100644 --- a/src/couch/src/test_request.erl +++ b/src/couch/src/test_request.erl @@ -74,7 +74,6 @@ options(Url, Headers) -> options(Url, Headers, Opts) -> request(options, Url, Headers, [], Opts). - request(Method, Url, Headers) -> request(Method, Url, Headers, []). diff --git a/src/couch/src/test_util.erl b/src/couch/src/test_util.erl index 125e76492..4b802bc49 100644 --- a/src/couch/src/test_util.erl +++ b/src/couch/src/test_util.erl @@ -40,8 +40,7 @@ -record(test_context, {mocked = [], started = [], module}). --define(DEFAULT_APPS, - [inets, ibrowse, ssl, config, couch_epi, couch_event, couch]). +-define(DEFAULT_APPS, [inets, ibrowse, ssl, config, couch_epi, couch_event, couch]). srcdir() -> code:priv_dir(couch) ++ "/../../". @@ -57,9 +56,12 @@ init_code_path() -> "mochiweb", "snappy" ], - lists:foreach(fun(Name) -> - code:add_patha(filename:join([builddir(), "src", Name])) - end, Paths). + lists:foreach( + fun(Name) -> + code:add_patha(filename:join([builddir(), "src", Name])) + end, + Paths + ). source_file(Name) -> filename:join([srcdir(), Name]). @@ -94,21 +96,21 @@ start_applications(Apps) -> start_applications([], Acc) -> lists:reverse(Acc); -start_applications([App|Apps], Acc) when App == kernel; App == stdlib -> +start_applications([App | Apps], Acc) when App == kernel; App == stdlib -> start_applications(Apps, Acc); -start_applications([App|Apps], Acc) -> +start_applications([App | Apps], Acc) -> case application:start(App) of - {error, {already_started, crypto}} -> - start_applications(Apps, [crypto | Acc]); - {error, {already_started, App}} -> - io:format(standard_error, "Application ~s was left running!~n", [App]), - application:stop(App), - start_applications([App|Apps], Acc); - {error, Reason} -> - io:format(standard_error, "Cannot start application '~s', reason ~p~n", [App, Reason]), - throw({error, {cannot_start, App, Reason}}); - ok -> - start_applications(Apps, [App|Acc]) + {error, {already_started, crypto}} -> + start_applications(Apps, [crypto | Acc]); + {error, {already_started, App}} -> + io:format(standard_error, "Application ~s was left running!~n", [App]), + application:stop(App), + start_applications([App | Apps], Acc); + {error, Reason} -> + io:format(standard_error, "Cannot start application '~s', reason ~p~n", [App, Reason]), + throw({error, {cannot_start, App, Reason}}); + ok -> + start_applications(Apps, [App | Acc]) end. stop_applications(Apps) -> @@ -119,12 +121,11 @@ start_config(Chain) -> case config:start_link(Chain) of {ok, Pid} -> {ok, Pid}; - {error, {already_started, OldPid}} -> + {error, {already_started, OldPid}} -> ok = stop_config(OldPid), start_config(Chain) end. - stop_config(Pid) -> Timeout = 1000, case stop_sync(Pid, fun() -> config:stop() end, Timeout) of @@ -150,8 +151,8 @@ stop_sync(Pid, Fun, Timeout) when is_function(Fun) and is_pid(Pid) -> catch unlink(Pid), Res = (catch Fun()), receive - {'DOWN', MRef, _, _, _} -> - Res + {'DOWN', MRef, _, _, _} -> + Res after Timeout -> timeout end @@ -159,7 +160,8 @@ stop_sync(Pid, Fun, Timeout) when is_function(Fun) and is_pid(Pid) -> after erlang:demonitor(MRef, [flush]) end; -stop_sync(_, _, _) -> error(badarg). +stop_sync(_, _, _) -> + error(badarg). stop_sync_throw(Name, Error) -> stop_sync_throw(Name, shutdown, Error). @@ -176,7 +178,8 @@ stop_sync_throw(Pid, Fun, Error, Timeout) -> with_process_restart(Name) -> {Pid, true} = with_process_restart( - Name, fun() -> exit(whereis(Name), shutdown) end), + Name, fun() -> exit(whereis(Name), shutdown) end + ), Pid. with_process_restart(Name, Fun) -> @@ -185,24 +188,26 @@ with_process_restart(Name, Fun) -> with_process_restart(Name, Fun, Timeout) -> Res = stop_sync(Name, Fun), case wait_process(Name, Timeout) of - timeout -> - timeout; - Pid -> - {Pid, Res} + timeout -> + timeout; + Pid -> + {Pid, Res} end. - wait_process(Name) -> wait_process(Name, 5000). wait_process(Name, Timeout) -> - wait(fun() -> - case whereis(Name) of - undefined -> - wait; - Pid -> - Pid - end - end, Timeout). + wait( + fun() -> + case whereis(Name) of + undefined -> + wait; + Pid -> + Pid + end + end, + Timeout + ). wait(Fun) -> wait(Fun, 5000, 50). @@ -218,11 +223,11 @@ wait(_Fun, Timeout, _Delay, Started, Prev) when Prev - Started > Timeout -> timeout; wait(Fun, Timeout, Delay, Started, _Prev) -> case Fun() of - wait -> - ok = timer:sleep(Delay), - wait(Fun, Timeout, Delay, Started, now_us()); - Else -> - Else + wait -> + ok = timer:sleep(Delay), + wait(Fun, Timeout, Delay, Started, now_us()); + Else -> + Else end. wait_value(Fun, Value) -> @@ -260,13 +265,17 @@ stop(#test_context{mocked = Mocked, started = Apps}) -> fake_db(Fields0) -> {ok, Db, Fields} = maybe_set_engine(Fields0), Indexes = lists:zip( - record_info(fields, db), - lists:seq(2, record_info(size, db)) - ), - lists:foldl(fun({FieldName, Value}, Acc) -> - Idx = couch_util:get_value(FieldName, Indexes), - setelement(Idx, Acc, Value) - end, Db, Fields). + record_info(fields, db), + lists:seq(2, record_info(size, db)) + ), + lists:foldl( + fun({FieldName, Value}, Acc) -> + Idx = couch_util:get_value(FieldName, Indexes), + setelement(Idx, Acc, Value) + end, + Db, + Fields + ). maybe_set_engine(Fields0) -> case lists:member(engine, Fields0) of @@ -279,11 +288,24 @@ maybe_set_engine(Fields0) -> end. get_engine_header(Fields) -> - Keys = [disk_version, update_seq, unused, id_tree_state, - seq_tree_state, local_tree_state, purge_seq, purged_docs, - security_ptr, revs_limit, uuid, epochs, compacted_seq], + Keys = [ + disk_version, + update_seq, + unused, + id_tree_state, + seq_tree_state, + local_tree_state, + purge_seq, + purged_docs, + security_ptr, + revs_limit, + uuid, + epochs, + compacted_seq + ], {HeadFields, RestFields} = lists:partition( - fun({K, _}) -> lists:member(K, Keys) end, Fields), + fun({K, _}) -> lists:member(K, Keys) end, Fields + ), Header0 = couch_bt_engine_header:new(), Header = couch_bt_engine_header:set(Header0, HeadFields), {ok, Header, RestFields}. @@ -315,7 +337,7 @@ load_applications_with_stats() -> ok. stats_file_to_app(File) -> - [_Desc, _Priv, App|_] = lists:reverse(filename:split(File)), + [_Desc, _Priv, App | _] = lists:reverse(filename:split(File)), erlang:list_to_atom(App). calculate_start_order(Apps) -> @@ -345,14 +367,19 @@ load_app_deps(App, StartOrder) -> {error, {already_loaded, App}} -> ok end, {ok, Apps} = application:get_key(App, applications), - Deps = case App of - kernel -> Apps; - stdlib -> Apps; - _ -> lists:usort([kernel, stdlib | Apps]) - end, - NewStartOrder = lists:foldl(fun(Dep, Acc) -> - load_app_deps(Dep, Acc) - end, StartOrder, Deps), + Deps = + case App of + kernel -> Apps; + stdlib -> Apps; + _ -> lists:usort([kernel, stdlib | Apps]) + end, + NewStartOrder = lists:foldl( + fun(Dep, Acc) -> + load_app_deps(Dep, Acc) + end, + StartOrder, + Deps + ), [App | NewStartOrder] end. diff --git a/src/couch/test/eunit/chttpd_endpoints_tests.erl b/src/couch/test/eunit/chttpd_endpoints_tests.erl index 3c8586a14..63f67c243 100644 --- a/src/couch/test/eunit/chttpd_endpoints_tests.erl +++ b/src/couch/test/eunit/chttpd_endpoints_tests.erl @@ -15,7 +15,6 @@ -include_lib("couch/include/couch_eunit.hrl"). -include_lib("couch/include/couch_db.hrl"). - endpoints_test_() -> { "Checking dynamic endpoints", @@ -33,7 +32,6 @@ endpoints_test_() -> } }. - url_handlers() -> Handlers = [ {<<"">>, chttpd_misc, handle_welcome_req}, @@ -53,15 +51,17 @@ url_handlers() -> {<<"_cluster_setup">>, setup_httpd, handle_setup_req} ], - lists:foreach(fun({Path, Mod, Fun}) -> - Handler = chttpd_handlers:url_handler(Path, undefined), - Expect = fun Mod:Fun/1, - ?assertEqual(Expect, Handler) - end, Handlers), + lists:foreach( + fun({Path, Mod, Fun}) -> + Handler = chttpd_handlers:url_handler(Path, undefined), + Expect = fun Mod:Fun/1, + ?assertEqual(Expect, Handler) + end, + Handlers + ), ?assertEqual(undefined, chttpd_handlers:url_handler("foo", undefined)). - db_handlers() -> Handlers = [ {<<"_view_cleanup">>, chttpd_db, handle_view_cleanup_req}, @@ -75,15 +75,17 @@ db_handlers() -> {<<"_find">>, mango_httpd, handle_req} ], - lists:foreach(fun({Path, Mod, Fun}) -> - Handler = chttpd_handlers:db_handler(Path, undefined), - Expect = fun Mod:Fun/2, - ?assertEqual(Expect, Handler) - end, Handlers), + lists:foreach( + fun({Path, Mod, Fun}) -> + Handler = chttpd_handlers:db_handler(Path, undefined), + Expect = fun Mod:Fun/2, + ?assertEqual(Expect, Handler) + end, + Handlers + ), ?assertEqual(undefined, chttpd_handlers:db_handler("bam", undefined)). - design_handlers() -> Handlers = [ {<<"_view">>, chttpd_view, handle_view_req}, @@ -94,10 +96,13 @@ design_handlers() -> {<<"_rewrite">>, chttpd_rewrite, handle_rewrite_req} ], - lists:foreach(fun({Path, Mod, Fun}) -> - Handler = chttpd_handlers:design_handler(Path, undefined), - Expect = fun Mod:Fun/3, - ?assertEqual(Expect, Handler) - end, Handlers), + lists:foreach( + fun({Path, Mod, Fun}) -> + Handler = chttpd_handlers:design_handler(Path, undefined), + Expect = fun Mod:Fun/3, + ?assertEqual(Expect, Handler) + end, + Handlers + ), ?assertEqual(undefined, chttpd_handlers:design_handler("baz", undefined)). diff --git a/src/couch/test/eunit/couch_auth_cache_tests.erl b/src/couch/test/eunit/couch_auth_cache_tests.erl index 71faf77d6..a4c31083a 100644 --- a/src/couch/test/eunit/couch_auth_cache_tests.erl +++ b/src/couch/test/eunit/couch_auth_cache_tests.erl @@ -21,27 +21,31 @@ start() -> test_util:start_couch([ioq]). - setup() -> DbName = ?tempdb(), - config:set("couch_httpd_auth", "authentication_db", - ?b2l(DbName), false), + config:set( + "couch_httpd_auth", + "authentication_db", + ?b2l(DbName), + false + ), DbName. teardown(DbName) -> ok = couch_server:delete(DbName, [?ADMIN_CTX]), ok. - couch_auth_cache_test_() -> { "CouchDB auth cache tests", { setup, - fun start/0, fun test_util:stop_couch/1, + fun start/0, + fun test_util:stop_couch/1, { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_get_nil_on_missed_cache/1, fun should_get_right_password_hash/1, @@ -120,18 +124,18 @@ auth_vdu_test_() -> [missing, user, other] ]), AllPossibleCases = couch_tests_combinatorics:product( - [AllPossibleDocs, AllPossibleDocs]), + [AllPossibleDocs, AllPossibleDocs] + ), ?assertEqual([], AllPossibleCases -- [[A, B] || {A, B, _} <- Cases]), { "Check User doc validation", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - [ - make_validate_test(Case) || Case <- Cases - ] - } + fun test_util:start_couch/0, + fun test_util:stop_couch/1, + [make_validate_test(Case) || Case <- Cases] + } }. should_get_nil_on_missed_cache(_) -> @@ -142,8 +146,10 @@ should_get_right_password_hash(DbName) -> PasswordHash = hash_password("pass1"), {ok, _} = update_user_doc(DbName, "joe", "pass1"), {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), - ?assertEqual(PasswordHash, - couch_util:get_value(<<"password_sha">>, Creds)) + ?assertEqual( + PasswordHash, + couch_util:get_value(<<"password_sha">>, Creds) + ) end). should_ensure_doc_hash_equals_cached_one(DbName) -> @@ -162,8 +168,10 @@ should_update_password(DbName) -> {ok, Rev} = update_user_doc(DbName, "joe", "pass1"), {ok, _} = update_user_doc(DbName, "joe", "pass2", Rev), {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), - ?assertEqual(PasswordHash, - couch_util:get_value(<<"password_sha">>, Creds)) + ?assertEqual( + PasswordHash, + couch_util:get_value(<<"password_sha">>, Creds) + ) end). should_cleanup_cache_after_userdoc_deletion(DbName) -> @@ -183,15 +191,21 @@ should_restore_cache_after_userdoc_recreation(DbName) -> {ok, _} = update_user_doc(DbName, "joe", "pass5"), {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), - ?assertEqual(PasswordHash, - couch_util:get_value(<<"password_sha">>, Creds)) + ?assertEqual( + PasswordHash, + couch_util:get_value(<<"password_sha">>, Creds) + ) end). should_drop_cache_on_auth_db_change(DbName) -> ?_test(begin {ok, _} = update_user_doc(DbName, "joe", "pass1"), - config:set("couch_httpd_auth", "authentication_db", - ?b2l(?tempdb()), false), + config:set( + "couch_httpd_auth", + "authentication_db", + ?b2l(?tempdb()), + false + ), ?assertEqual(nil, couch_auth_cache:get_user_creds("joe")) end). @@ -202,17 +216,27 @@ should_restore_cache_on_auth_db_change(DbName) -> {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), DbName1 = ?tempdb(), - config:set("couch_httpd_auth", "authentication_db", - ?b2l(DbName1), false), + config:set( + "couch_httpd_auth", + "authentication_db", + ?b2l(DbName1), + false + ), {ok, _} = update_user_doc(DbName1, "joe", "pass5"), - config:set("couch_httpd_auth", "authentication_db", - ?b2l(DbName), false), + config:set( + "couch_httpd_auth", + "authentication_db", + ?b2l(DbName), + false + ), {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), - ?assertEqual(PasswordHash, - couch_util:get_value(<<"password_sha">>, Creds)) + ?assertEqual( + PasswordHash, + couch_util:get_value(<<"password_sha">>, Creds) + ) end). should_recover_cache_after_shutdown(DbName) -> @@ -225,7 +249,6 @@ should_recover_cache_after_shutdown(DbName) -> ?assertEqual(PasswordHash, get_user_doc_password_sha(DbName, "joe")) end). - should_get_admin_from_config(_DbName) -> ?_test(begin config:set("admins", "testadmin", "password", false), @@ -245,17 +268,19 @@ update_user_doc(DbName, UserName, Password) -> update_user_doc(DbName, UserName, Password, Rev) -> ok = couch_auth_cache:ensure_users_db_exists(), User = iolist_to_binary(UserName), - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"org.couchdb.user:", User/binary>>}, - {<<"name">>, User}, - {<<"type">>, <<"user">>}, - {<<"salt">>, ?SALT}, - {<<"password_sha">>, hash_password(Password)}, - {<<"roles">>, []} - ] ++ case Rev of - nil -> []; - _ -> [{<<"_rev">>, Rev}] - end + Doc = couch_doc:from_json_obj({ + [ + {<<"_id">>, <<"org.couchdb.user:", User/binary>>}, + {<<"name">>, User}, + {<<"type">>, <<"user">>}, + {<<"salt">>, ?SALT}, + {<<"password_sha">>, hash_password(Password)}, + {<<"roles">>, []} + ] ++ + case Rev of + nil -> []; + _ -> [{<<"_rev">>, Rev}] + end }), {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), {ok, NewRev} = couch_db:update_doc(AuthDb, Doc, []), @@ -275,13 +300,13 @@ get_doc_rev(DbName, UserName) -> DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]), {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), UpdateRev = - case couch_db:open_doc(AuthDb, DocId, []) of - {ok, Doc} -> - {Props} = couch_doc:to_json_obj(Doc, []), - couch_util:get_value(<<"_rev">>, Props); - {not_found, missing} -> - nil - end, + case couch_db:open_doc(AuthDb, DocId, []) of + {ok, Doc} -> + {Props} = couch_doc:to_json_obj(Doc, []), + couch_util:get_value(<<"_rev">>, Props); + {not_found, missing} -> + nil + end, ok = couch_db:close(AuthDb), {ok, UpdateRev}. @@ -298,15 +323,16 @@ delete_user_doc(DbName, UserName) -> {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []), {Props} = couch_doc:to_json_obj(Doc, []), - DeletedDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DocId}, - {<<"_rev">>, couch_util:get_value(<<"_rev">>, Props)}, - {<<"_deleted">>, true} - ]}), + DeletedDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, DocId}, + {<<"_rev">>, couch_util:get_value(<<"_rev">>, Props)}, + {<<"_deleted">>, true} + ]} + ), {ok, _} = couch_db:update_doc(AuthDb, DeletedDoc, []), ok = couch_db:close(AuthDb). - make_validate_test({Old, New, "ok"} = Case) -> {test_id(Case), ?_assertEqual(ok, validate(doc(Old), doc(New)))}; make_validate_test({Old, New, Reason} = Case) -> @@ -314,19 +340,25 @@ make_validate_test({Old, New, Reason} = Case) -> {test_id(Case), ?_assertThrow({forbidden, Failure}, validate(doc(Old), doc(New)))}. test_id({[OldRoles, OldType], [NewRoles, NewType], Result}) -> - lists:flatten(io_lib:format( - "(roles: ~w, type: ~w) -> (roles: ~w, type: ~w) ==> \"~s\"", - [OldRoles, OldType, NewRoles, NewType, Result])). + lists:flatten( + io_lib:format( + "(roles: ~w, type: ~w) -> (roles: ~w, type: ~w) ==> \"~s\"", + [OldRoles, OldType, NewRoles, NewType, Result] + ) + ). doc([Roles, Type]) -> - couch_doc:from_json_obj({[ - {<<"_id">>,<<"org.couchdb.user:foo">>}, - {<<"_rev">>,<<"1-281c81adb1bf10927a6160f246dc0468">>}, - {<<"name">>,<<"foo">>}, - {<<"password_scheme">>,<<"simple">>}, - {<<"salt">>,<<"00000000000000000000000000000000">>}, - {<<"password_sha">>, <<"111111111111111111111111111111111111">>}] - ++ type(Type) ++ roles(Roles)}). + couch_doc:from_json_obj({ + [ + {<<"_id">>, <<"org.couchdb.user:foo">>}, + {<<"_rev">>, <<"1-281c81adb1bf10927a6160f246dc0468">>}, + {<<"name">>, <<"foo">>}, + {<<"password_scheme">>, <<"simple">>}, + {<<"salt">>, <<"00000000000000000000000000000000">>}, + {<<"password_sha">>, <<"111111111111111111111111111111111111">>} + ] ++ + type(Type) ++ roles(Roles) + }). roles(custom) -> [{<<"roles">>, [<<"custom">>]}]; roles(missing) -> []. @@ -336,11 +368,12 @@ type(other) -> [{<<"type">>, <<"other">>}]; type(missing) -> []. validate(DiskDoc, NewDoc) -> - JSONCtx = {[ - {<<"db">>, <<"foo/bar">>}, - {<<"name">>, <<"foo">>}, - {<<"roles">>, [<<"_admin">>]} - ]}, + JSONCtx = + {[ + {<<"db">>, <<"foo/bar">>}, + {<<"name">>, <<"foo">>}, + {<<"roles">>, [<<"_admin">>]} + ]}, validate(DiskDoc, NewDoc, JSONCtx). validate(DiskDoc, NewDoc, JSONCtx) -> diff --git a/src/couch/test/eunit/couch_bt_engine_compactor_ev.erl b/src/couch/test/eunit/couch_bt_engine_compactor_ev.erl index f50be84de..72b780a7f 100644 --- a/src/couch/test/eunit/couch_bt_engine_compactor_ev.erl +++ b/src/couch/test/eunit/couch_bt_engine_compactor_ev.erl @@ -12,7 +12,6 @@ -module(couch_bt_engine_compactor_ev). - -export([ init/0, terminate/0, @@ -24,22 +23,17 @@ event/1 ]). - -define(TAB, couch_db_updater_ev_tab). - init() -> ets:new(?TAB, [set, public, named_table]). - terminate() -> ets:delete(?TAB). - clear() -> ets:delete_all_objects(?TAB). - set_wait(Event) -> Self = self(), WaitFun = fun(_) -> @@ -51,48 +45,48 @@ set_wait(Event) -> end, ContinueFun = fun(Pid) -> Pid ! {Self, go}, - receive {Pid, ok} -> ok end + receive + {Pid, ok} -> ok + end end, ets:insert(?TAB, {Event, WaitFun}), {ok, ContinueFun}. - set_crash(Event) -> Reason = {couch_db_updater_ev_crash, Event}, CrashFun = fun(_) -> exit(Reason) end, ets:insert(?TAB, {Event, CrashFun}), {ok, Reason}. - event(Event) -> - NewEvent = case Event of - seq_init -> - put(?MODULE, 0), - Event; - seq_copy -> - Count = get(?MODULE), - put(?MODULE, Count + 1), - {seq_copy, Count}; - id_init -> - put(?MODULE, 0), - Event; - id_copy -> - Count = get(?MODULE), - put(?MODULE, Count + 1), - {id_copy, Count}; - md_copy_init -> - put(?MODULE, 0), - Event; - md_copy_row -> - Count = get(?MODULE), - put(?MODULE, Count + 1), - {md_copy_row, Count}; - _ -> - Event - end, + NewEvent = + case Event of + seq_init -> + put(?MODULE, 0), + Event; + seq_copy -> + Count = get(?MODULE), + put(?MODULE, Count + 1), + {seq_copy, Count}; + id_init -> + put(?MODULE, 0), + Event; + id_copy -> + Count = get(?MODULE), + put(?MODULE, Count + 1), + {id_copy, Count}; + md_copy_init -> + put(?MODULE, 0), + Event; + md_copy_row -> + Count = get(?MODULE), + put(?MODULE, Count + 1), + {md_copy_row, Count}; + _ -> + Event + end, handle_event(NewEvent). - handle_event(Event) -> try case ets:lookup(?TAB, Event) of @@ -101,6 +95,7 @@ handle_event(Event) -> [] -> ok end - catch error:badarg -> - ok - end.
\ No newline at end of file + catch + error:badarg -> + ok + end. diff --git a/src/couch/test/eunit/couch_bt_engine_compactor_ev_tests.erl b/src/couch/test/eunit/couch_bt_engine_compactor_ev_tests.erl index 090217b4c..007c74d06 100644 --- a/src/couch/test/eunit/couch_bt_engine_compactor_ev_tests.erl +++ b/src/couch/test/eunit/couch_bt_engine_compactor_ev_tests.erl @@ -39,21 +39,28 @@ events() -> [ - init, % The compactor process is spawned - files_opened, % After compaction files have opened + % The compactor process is spawned + init, + % After compaction files have opened + files_opened, - purge_init, % Just before apply purge changes - purge_done, % Just after finish purge updates + % Just before apply purge changes + purge_init, + % Just after finish purge updates + purge_done, % The firs phase is when we copy all document body and attachment % data to the new database file in order of update sequence so % that we can resume on crash. - seq_init, % Before the first change is copied - {seq_copy, 0}, % After change N is copied + % Before the first change is copied + seq_init, + % After change N is copied + {seq_copy, 0}, {seq_copy, ?INIT_DOCS div 2}, {seq_copy, ?INIT_DOCS - 2}, - seq_done, % After last change is copied + % After last change is copied + seq_done, % The id copy phases come in two flavors. Before a compaction % swap is attempted they're copied from the id_tree in the @@ -61,19 +68,27 @@ events() -> % stored in an emsort file on disk. Thus the two sets of % related events here. - md_sort_init, % Just before metadata sort starts - md_sort_done, % Justa after metadata sort finished - md_copy_init, % Just before metadata copy starts - {md_copy_row, 0}, % After docid N is copied + % Just before metadata sort starts + md_sort_init, + % Justa after metadata sort finished + md_sort_done, + % Just before metadata copy starts + md_copy_init, + % After docid N is copied + {md_copy_row, 0}, {md_copy_row, ?INIT_DOCS div 2}, {md_copy_row, ?INIT_DOCS - 2}, - md_copy_done, % Just after the last docid is copied + % Just after the last docid is copied + md_copy_done, % And then the final steps before we finish - before_final_sync, % Just before final sync - after_final_sync, % Just after the final sync - before_notify % Just before the final notification + % Just before final sync + before_final_sync, + % Just after the final sync + after_final_sync, + % Just before the final notification + before_notify ]. % Mark which evens only happen when documents are present @@ -86,7 +101,6 @@ requires_docs({md_copy_row, _}) -> true; requires_docs(md_copy_done) -> true; requires_docs(_) -> false. - % Mark which events only happen when there's write activity during % a compaction. @@ -97,25 +111,21 @@ requires_write({md_copy_row, _}) -> true; requires_write(md_copy_done) -> true; requires_write(_) -> false. - setup() -> purge_module(), ?EV_MOD:init(), test_util:start_couch(). - teardown(Ctx) -> test_util:stop_couch(Ctx), ?EV_MOD:terminate(). - start_empty_db_test(_Event) -> ?EV_MOD:clear(), DbName = ?tempdb(), {ok, _} = couch_db:create(DbName, [?ADMIN_CTX]), DbName. - start_populated_db_test(Event) -> DbName = start_empty_db_test(Event), {ok, Db} = couch_db:open_int(DbName, []), @@ -126,11 +136,9 @@ start_populated_db_test(Event) -> end, DbName. - stop_test(_Event, DbName) -> couch_server:delete(DbName, [?ADMIN_CTX]). - static_empty_db_test_() -> FiltFun = fun(E) -> not (requires_docs(E) or requires_write(E)) @@ -153,7 +161,6 @@ static_empty_db_test_() -> } }. - static_populated_db_test_() -> FiltFun = fun(E) -> not requires_write(E) end, Events = lists:filter(FiltFun, events()) -- [init], @@ -174,7 +181,6 @@ static_populated_db_test_() -> } }. - dynamic_empty_db_test_() -> FiltFun = fun(E) -> not requires_docs(E) end, Events = lists:filter(FiltFun, events()) -- [init], @@ -195,7 +201,6 @@ dynamic_empty_db_test_() -> } }. - dynamic_populated_db_test_() -> Events = events() -- [init], { @@ -215,13 +220,11 @@ dynamic_populated_db_test_() -> } }. - run_static_init(Event, DbName) -> Name = lists:flatten(io_lib:format("~p", [Event])), Test = {timeout, ?TIMEOUT_EUNIT, ?_test(run_static(Event, DbName))}, {Name, Test}. - run_static(Event, DbName) -> {ok, ContinueFun} = ?EV_MOD:set_wait(init), {ok, Reason} = ?EV_MOD:set_crash(Event), @@ -236,13 +239,11 @@ run_static(Event, DbName) -> run_successful_compaction(DbName), couch_db:close(Db). - run_dynamic_init(Event, DbName) -> Name = lists:flatten(io_lib:format("~p", [Event])), Test = {timeout, ?TIMEOUT_EUNIT, ?_test(run_dynamic(Event, DbName))}, {Name, Test}. - run_dynamic(Event, DbName) -> {ok, ContinueFun} = ?EV_MOD:set_wait(init), {ok, Reason} = ?EV_MOD:set_crash(Event), @@ -258,7 +259,6 @@ run_dynamic(Event, DbName) -> run_successful_compaction(DbName), couch_db:close(Db). - run_successful_compaction(DbName) -> ?EV_MOD:clear(), {ok, ContinueFun} = ?EV_MOD:set_wait(init), @@ -274,14 +274,11 @@ run_successful_compaction(DbName) -> validate_compaction(NewDb), couch_db:close(Db). - wait_db_cleared(Db) -> wait_db_cleared(Db, 5). - wait_db_cleared(Db, N) when N < 0 -> erlang:error({db_clear_timeout, couch_db:name(Db)}); - wait_db_cleared(Db, N) -> Tab = couch_server:couch_dbs(couch_db:name(Db)), case ets:lookup(Tab, couch_db:name(Db)) of @@ -290,29 +287,33 @@ wait_db_cleared(Db, N) -> [#entry{db = NewDb}] -> OldPid = couch_db:get_pid(Db), NewPid = couch_db:get_pid(NewDb), - if NewPid /= OldPid -> ok; true -> - timer:sleep(100), - wait_db_cleared(Db, N - 1) + if + NewPid /= OldPid -> + ok; + true -> + timer:sleep(100), + wait_db_cleared(Db, N - 1) end end. - populate_db(_Db, NumDocs) when NumDocs =< 0 -> ok; populate_db(Db, NumDocs) -> String = [$a || _ <- lists:seq(1, erlang:min(NumDocs, 500))], Docs = lists:map( fun(_) -> - couch_doc:from_json_obj({[ - {<<"_id">>, couch_uuids:random()}, - {<<"string">>, list_to_binary(String)} - ]}) + couch_doc:from_json_obj( + {[ + {<<"_id">>, couch_uuids:random()}, + {<<"string">>, list_to_binary(String)} + ]} + ) end, - lists:seq(1, 500)), + lists:seq(1, 500) + ), {ok, _} = couch_db:update_docs(Db, Docs, []), populate_db(Db, NumDocs - 500). - validate_compaction(Db) -> {ok, DocCount} = couch_db:get_doc_count(Db), {ok, DelDocCount} = couch_db:get_del_doc_count(Db), @@ -325,7 +326,6 @@ validate_compaction(Db) -> ?assertEqual(DocCount + DelDocCount, LastCount), ?assertEqual(NumChanges, LastCount). - purge_module() -> case code:which(couch_db_updater) of cover_compiled -> diff --git a/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl b/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl index 4c4c43958..73428b0a9 100644 --- a/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl +++ b/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl @@ -12,15 +12,12 @@ -module(couch_bt_engine_compactor_tests). - -include_lib("couch/include/couch_eunit.hrl"). -include_lib("couch/include/couch_db.hrl"). - -define(DELAY, 100). -define(WAIT_DELAY_COUNT, 50). - setup() -> DbName = ?tempdb(), {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), @@ -28,12 +25,10 @@ setup() -> create_docs(DbName), DbName. - teardown(DbName) when is_binary(DbName) -> couch_server:delete(DbName, [?ADMIN_CTX]), ok. - compaction_resume_test_() -> { setup, @@ -49,7 +44,6 @@ compaction_resume_test_() -> } }. - compaction_resume(DbName) -> ?_test(begin check_db_validity(DbName), @@ -66,14 +60,12 @@ compaction_resume(DbName) -> check_db_validity(DbName) end). - check_db_validity(DbName) -> couch_util:with_db(DbName, fun(Db) -> ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), ?assertEqual(3, couch_db:count_changes_since(Db, 0)) end). - with_mecked_emsort(Fun) -> meck:new(couch_emsort, [passthrough]), meck:expect(couch_emsort, iter, fun(_) -> erlang:error(kaboom) end), @@ -83,35 +75,35 @@ with_mecked_emsort(Fun) -> meck:unload() end. - create_docs(DbName) -> couch_util:with_db(DbName, fun(Db) -> - Doc1 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc1">>}, - {<<"value">>, 1} - - ]}), - Doc2 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc2">>}, - {<<"value">>, 2} - - ]}), - Doc3 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc3">>}, - {<<"value">>, 3} - - ]}), + Doc1 = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"doc1">>}, + {<<"value">>, 1} + ]} + ), + Doc2 = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"doc2">>}, + {<<"value">>, 2} + ]} + ), + Doc3 = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"doc3">>}, + {<<"value">>, 3} + ]} + ), {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]) end). - compact_db(DbName) -> couch_util:with_db(DbName, fun(Db) -> {ok, _} = couch_db:start_compact(Db) end), wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT). - wait_db_compact_done(_DbName, 0) -> Failure = [ {module, ?MODULE}, @@ -123,7 +115,10 @@ wait_db_compact_done(DbName, N) -> IsDone = couch_util:with_db(DbName, fun(Db) -> not is_pid(couch_db:get_compactor_pid(Db)) end), - if IsDone -> ok; true -> - timer:sleep(?DELAY), - wait_db_compact_done(DbName, N - 1) + if + IsDone -> + ok; + true -> + timer:sleep(?DELAY), + wait_db_compact_done(DbName, N - 1) end. diff --git a/src/couch/test/eunit/couch_bt_engine_tests.erl b/src/couch/test/eunit/couch_bt_engine_tests.erl index 3e3ecbf25..56d18d3a4 100644 --- a/src/couch/test/eunit/couch_bt_engine_tests.erl +++ b/src/couch/test/eunit/couch_bt_engine_tests.erl @@ -12,9 +12,7 @@ -module(couch_bt_engine_tests). - -include_lib("eunit/include/eunit.hrl"). - -couch_bt_engine_test_()-> +couch_bt_engine_test_() -> cpse_util:create_tests(couch, couch_bt_engine, "couch"). diff --git a/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl b/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl index a2a972caf..62f128a4f 100644 --- a/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl +++ b/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl @@ -15,7 +15,8 @@ -include_lib("couch/include/couch_eunit.hrl"). -include_lib("couch/include/couch_db.hrl"). --define(TIMEOUT, 60). % seconds +% seconds +-define(TIMEOUT, 60). setup(_) -> Ctx = test_util:start_couch(), @@ -30,23 +31,27 @@ setup(_) -> "db_v7_with_2_purge_req.couch", "db_v7_with_1_purge_req_for_2_docs.couch" ], - NewPaths = lists:map(fun(DbFileName) -> - OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]), - NewDbFilePath = filename:join([DbDir, DbFileName]), - ok = filelib:ensure_dir(NewDbFilePath), - file:delete(NewDbFilePath), - {ok, _} = file:copy(OldDbFilePath, NewDbFilePath), - NewDbFilePath - end, DbFileNames), + NewPaths = lists:map( + fun(DbFileName) -> + OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]), + NewDbFilePath = filename:join([DbDir, DbFileName]), + ok = filelib:ensure_dir(NewDbFilePath), + file:delete(NewDbFilePath), + {ok, _} = file:copy(OldDbFilePath, NewDbFilePath), + NewDbFilePath + end, + DbFileNames + ), {Ctx, NewPaths}. - teardown(_, {Ctx, Paths}) -> test_util:stop_couch(Ctx), - lists:foreach(fun(Path) -> - file:delete(Path) - end, Paths). - + lists:foreach( + fun(Path) -> + file:delete(Path) + end, + Paths + ). upgrade_test_() -> From = [6, 7], @@ -54,174 +59,182 @@ upgrade_test_() -> "Couch Bt Engine Upgrade tests", { foreachx, - fun setup/1, fun teardown/2, + fun setup/1, + fun teardown/2, [{F, fun t_upgrade_without_purge_req/2} || F <- From] ++ - [{F, fun t_upgrade_with_1_purge_req/2} || F <- From] ++ - [{F, fun t_upgrade_with_N_purge_req/2} || F <- From] ++ - [{F, fun t_upgrade_with_1_purge_req_for_2_docs/2} || F <- From] + [{F, fun t_upgrade_with_1_purge_req/2} || F <- From] ++ + [{F, fun t_upgrade_with_N_purge_req/2} || F <- From] ++ + [{F, fun t_upgrade_with_1_purge_req_for_2_docs/2} || F <- From] } }. - t_upgrade_without_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> - {timeout, ?TIMEOUT, ?_test(begin - % There are three documents in the fixture - % db with zero purge entries - DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) - ++ "_without_purge_req"), - - ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), - {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> - ?assertEqual(0, couch_db:get_purge_seq(Db)), - couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, []) - end), - ?assertEqual([], UpgradedPurged), - ?assertEqual(8, get_disk_version_from_header(DbName)), - {ok, Rev} = save_doc( - DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]} - ), - {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 5}, couch_db:get_doc_count(Db)), - ?assertEqual(0, couch_db:get_purge_seq(Db)) - end), - - PurgeReqs = [ - {couch_uuids:random(), <<"doc4">>, [Rev]} - ], - - {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> - couch_db:purge_docs(Db, PurgeReqs) - end), - ?assertEqual(PRevs, [Rev]), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)), - ?assertEqual(1, couch_db:get_purge_seq(Db)) - end) - end)}. - + {timeout, ?TIMEOUT, + ?_test(begin + % There are three documents in the fixture + % db with zero purge entries + DbName = ?l2b( + "db_v" ++ integer_to_list(VersionFrom) ++ + "_without_purge_req" + ), + + ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), + {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> + ?assertEqual(0, couch_db:get_purge_seq(Db)), + couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, []) + end), + ?assertEqual([], UpgradedPurged), + ?assertEqual(8, get_disk_version_from_header(DbName)), + {ok, Rev} = save_doc( + DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]} + ), + {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 5}, couch_db:get_doc_count(Db)), + ?assertEqual(0, couch_db:get_purge_seq(Db)) + end), + + PurgeReqs = [ + {couch_uuids:random(), <<"doc4">>, [Rev]} + ], + + {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> + couch_db:purge_docs(Db, PurgeReqs) + end), + ?assertEqual(PRevs, [Rev]), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)), + ?assertEqual(1, couch_db:get_purge_seq(Db)) + end) + end)}. t_upgrade_with_1_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> - {timeout, ?TIMEOUT, ?_test(begin - % There are two documents in the fixture database - % with a single purge entry - DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) - ++ "_with_1_purge_req"), - - ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), - {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> - ?assertEqual(1, couch_db:get_purge_seq(Db)), - couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, []) - end), - ?assertEqual(8, get_disk_version_from_header(DbName)), - ?assertEqual([{1, <<"doc1">>}], UpgradedPurged), - - {ok, Rev} = save_doc( - DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]} - ), - {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)), - ?assertEqual(1, couch_db:get_purge_seq(Db)) - end), - - PurgeReqs = [ - {couch_uuids:random(), <<"doc4">>, [Rev]} - ], - - {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> - couch_db:purge_docs(Db, PurgeReqs) - end), - ?assertEqual(PRevs, [Rev]), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), - ?assertEqual(2, couch_db:get_purge_seq(Db)) - end) - end)}. - + {timeout, ?TIMEOUT, + ?_test(begin + % There are two documents in the fixture database + % with a single purge entry + DbName = ?l2b( + "db_v" ++ integer_to_list(VersionFrom) ++ + "_with_1_purge_req" + ), + + ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), + {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> + ?assertEqual(1, couch_db:get_purge_seq(Db)), + couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, []) + end), + ?assertEqual(8, get_disk_version_from_header(DbName)), + ?assertEqual([{1, <<"doc1">>}], UpgradedPurged), + + {ok, Rev} = save_doc( + DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]} + ), + {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)), + ?assertEqual(1, couch_db:get_purge_seq(Db)) + end), + + PurgeReqs = [ + {couch_uuids:random(), <<"doc4">>, [Rev]} + ], + + {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> + couch_db:purge_docs(Db, PurgeReqs) + end), + ?assertEqual(PRevs, [Rev]), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), + ?assertEqual(2, couch_db:get_purge_seq(Db)) + end) + end)}. t_upgrade_with_N_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> - {timeout, ?TIMEOUT, ?_test(begin - % There is one document in the fixture database - % with two docs that have been purged - DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) - ++ "_with_2_purge_req"), - - ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), - {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> - ?assertEqual(2, couch_db:get_purge_seq(Db)), - couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, []) - end), - ?assertEqual(8, get_disk_version_from_header(DbName)), - ?assertEqual([{2, <<"doc2">>}], UpgradedPurged), - - {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}), - {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), - ?assertEqual(2, couch_db:get_purge_seq(Db)) - end), - - PurgeReqs = [ - {couch_uuids:random(), <<"doc4">>, [Rev]} - ], - - {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> - couch_db:purge_docs(Db, PurgeReqs) - end), - ?assertEqual(PRevs, [Rev]), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 2}, couch_db:get_doc_count(Db)), - ?assertEqual(3, couch_db:get_purge_seq(Db)) - end) - end)}. - + {timeout, ?TIMEOUT, + ?_test(begin + % There is one document in the fixture database + % with two docs that have been purged + DbName = ?l2b( + "db_v" ++ integer_to_list(VersionFrom) ++ + "_with_2_purge_req" + ), + + ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), + {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> + ?assertEqual(2, couch_db:get_purge_seq(Db)), + couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, []) + end), + ?assertEqual(8, get_disk_version_from_header(DbName)), + ?assertEqual([{2, <<"doc2">>}], UpgradedPurged), + + {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}), + {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), + ?assertEqual(2, couch_db:get_purge_seq(Db)) + end), + + PurgeReqs = [ + {couch_uuids:random(), <<"doc4">>, [Rev]} + ], + + {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> + couch_db:purge_docs(Db, PurgeReqs) + end), + ?assertEqual(PRevs, [Rev]), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 2}, couch_db:get_doc_count(Db)), + ?assertEqual(3, couch_db:get_purge_seq(Db)) + end) + end)}. t_upgrade_with_1_purge_req_for_2_docs(VersionFrom, {_Ctx, _NewPaths}) -> - {timeout, ?TIMEOUT, ?_test(begin - % There are two documents (Doc4 and Doc5) in the fixture database - % with three docs (Doc1, Doc2 and Doc3) that have been purged, and - % with one purge req for Doc1 and another purge req for Doc 2 and Doc3 - DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) - ++ "_with_1_purge_req_for_2_docs"), - - ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), - {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> - ?assertEqual(3, couch_db:get_purge_seq(Db)), - couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, []) - end), - ?assertEqual(8, get_disk_version_from_header(DbName)), - ?assertEqual([{3,<<"doc2">>},{2,<<"doc3">>}], UpgradedPurged), - - {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc6">>}, {<<"v">>, 1}]}), - {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc7">>}, {<<"v">>, 2}]}), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)), - ?assertEqual(3, couch_db:get_purge_seq(Db)) - end), - - PurgeReqs = [ - {couch_uuids:random(), <<"doc6">>, [Rev]} - ], - - {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> - couch_db:purge_docs(Db, PurgeReqs) - end), - ?assertEqual(PRevs, [Rev]), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), - ?assertEqual(4, couch_db:get_purge_seq(Db)) - end) - end)}. - + {timeout, ?TIMEOUT, + ?_test(begin + % There are two documents (Doc4 and Doc5) in the fixture database + % with three docs (Doc1, Doc2 and Doc3) that have been purged, and + % with one purge req for Doc1 and another purge req for Doc 2 and Doc3 + DbName = ?l2b( + "db_v" ++ integer_to_list(VersionFrom) ++ + "_with_1_purge_req_for_2_docs" + ), + + ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), + {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> + ?assertEqual(3, couch_db:get_purge_seq(Db)), + couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, []) + end), + ?assertEqual(8, get_disk_version_from_header(DbName)), + ?assertEqual([{3, <<"doc2">>}, {2, <<"doc3">>}], UpgradedPurged), + + {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc6">>}, {<<"v">>, 1}]}), + {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc7">>}, {<<"v">>, 2}]}), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)), + ?assertEqual(3, couch_db:get_purge_seq(Db)) + end), + + PurgeReqs = [ + {couch_uuids:random(), <<"doc6">>, [Rev]} + ], + + {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> + couch_db:purge_docs(Db, PurgeReqs) + end), + ?assertEqual(PRevs, [Rev]), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), + ?assertEqual(4, couch_db:get_purge_seq(Db)) + end) + end)}. save_doc(DbName, Json) -> Doc = couch_doc:from_json_obj(Json), @@ -229,11 +242,9 @@ save_doc(DbName, Json) -> couch_db:update_doc(Db, Doc, []) end). - fold_fun({PSeq, _UUID, Id, _Revs}, Acc) -> {ok, [{PSeq, Id} | Acc]}. - get_disk_version_from_header(DbFileName) -> DbDir = config:get("couchdb", "database_dir"), DbFilePath = filename:join([DbDir, ?l2b(?b2l(DbFileName) ++ ".couch")]), diff --git a/src/couch/test/eunit/couch_btree_tests.erl b/src/couch/test/eunit/couch_btree_tests.erl index c9b791d2c..1c9ba7771 100644 --- a/src/couch/test/eunit/couch_btree_tests.erl +++ b/src/couch/test/eunit/couch_btree_tests.erl @@ -16,13 +16,15 @@ -include_lib("couch/include/couch_db.hrl"). -define(ROWS, 1000). --define(TIMEOUT, 60). % seconds - +% seconds +-define(TIMEOUT, 60). setup() -> {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]), - {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none}, - {reduce, fun reduce_fun/2}]), + {ok, Btree} = couch_btree:open(nil, Fd, [ + {compression, none}, + {reduce, fun reduce_fun/2} + ]), {Fd, Btree}. setup_kvs(_) -> @@ -35,7 +37,10 @@ setup_red() -> "even" -> {"odd", [{{Key, Idx}, 1} | Acc]}; _ -> {"even", [{{Key, Idx}, 1} | Acc]} end - end, {"odd", []}, lists:seq(1, ?ROWS)), + end, + {"odd", []}, + lists:seq(1, ?ROWS) + ), {Fd, Btree} = setup(), {ok, Btree1} = couch_btree:add_remove(Btree, EvenOddKVs, []), {Fd, Btree1}. @@ -49,7 +54,6 @@ teardown({Fd, _}) -> teardown(_, {Fd, _}) -> teardown(Fd). - kvs_test_funs() -> [ fun should_set_fd_correctly/2, @@ -72,7 +76,6 @@ red_test_funs() -> fun should_reduce_second_half/2 ]. - btree_open_test_() -> {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]), {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none}]), @@ -88,10 +91,12 @@ sorted_kvs_test_() -> "BTree with sorted keys", { setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + fun() -> test_util:start(?MODULE, [ioq]) end, + fun test_util:stop/1, { foreachx, - fun setup_kvs/1, fun teardown/2, + fun setup_kvs/1, + fun teardown/2, [{Sorted, Fun} || Fun <- Funs] } } @@ -105,10 +110,12 @@ rsorted_kvs_test_() -> "BTree with backward sorted keys", { setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + fun() -> test_util:start(?MODULE, [ioq]) end, + fun test_util:stop/1, { foreachx, - fun setup_kvs/1, fun teardown/2, + fun setup_kvs/1, + fun teardown/2, [{Reversed, Fun} || Fun <- Funs] } } @@ -122,10 +129,12 @@ shuffled_kvs_test_() -> "BTree with shuffled keys", { setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + fun() -> test_util:start(?MODULE, [ioq]) end, + fun test_util:stop/1, { foreachx, - fun setup_kvs/1, fun teardown/2, + fun setup_kvs/1, + fun teardown/2, [{Shuffled, Fun} || Fun <- Funs] } } @@ -136,13 +145,15 @@ reductions_test_() -> "BTree reductions", { setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + fun() -> test_util:start(?MODULE, [ioq]) end, + fun test_util:stop/1, [ { "Common tests", { foreach, - fun setup_red/0, fun teardown/1, + fun setup_red/0, + fun teardown/1, [ fun should_reduce_without_specified_direction/1, fun should_reduce_forward/1, @@ -157,7 +168,8 @@ reductions_test_() -> "Forward direction", { foreachx, - fun setup_red/1, fun teardown/2, + fun setup_red/1, + fun teardown/2, [{fwd, F} || F <- red_test_funs()] } }, @@ -165,7 +177,8 @@ reductions_test_() -> "Backward direction", { foreachx, - fun setup_red/1, fun teardown/2, + fun setup_red/1, + fun teardown/2, [{rev, F} || F <- red_test_funs()] } } @@ -175,7 +188,6 @@ reductions_test_() -> } }. - should_set_fd_correctly(_, {Fd, Btree}) -> ?_assertMatch(Fd, Btree#btree.fd). @@ -191,7 +203,7 @@ should_set_reduce_option(_, {_, Btree}) -> ?_assertMatch(ReduceFun, Btree1#btree.reduce). should_fold_over_empty_btree(_, {_, Btree}) -> - {ok, _, EmptyRes} = couch_btree:foldl(Btree, fun(_, X) -> {ok, X+1} end, 0), + {ok, _, EmptyRes} = couch_btree:foldl(Btree, fun(_, X) -> {ok, X + 1} end, 0), ?_assertEqual(EmptyRes, 0). should_add_all_keys(KeyValues, {Fd, Btree}) -> @@ -214,8 +226,10 @@ should_have_lesser_size_than_file(Fd, Btree) -> ?_assert((couch_btree:size(Btree) =< couch_file:bytes(Fd))). should_keep_root_pointer_to_kp_node(Fd, Btree) -> - ?_assertMatch({ok, {kp_node, _}}, - couch_file:pread_term(Fd, element(1, Btree#btree.root))). + ?_assertMatch( + {ok, {kp_node, _}}, + couch_file:pread_term(Fd, element(1, Btree#btree.root)) + ). should_remove_all_keys(KeyValues, Btree) -> Keys = keys(KeyValues), @@ -234,7 +248,10 @@ should_continuously_add_new_kv(KeyValues, {_, Btree}) -> {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []), ?assert(couch_btree:size(BtAcc2) > PrevSize), {BtAcc2, couch_btree:size(BtAcc2)} - end, {Btree, couch_btree:size(Btree)}, KeyValues), + end, + {Btree, couch_btree:size(Btree)}, + KeyValues + ), { "Should continuously add key-values to btree", [ @@ -250,7 +267,10 @@ should_continuously_remove_keys(KeyValues, {_, Btree}) -> {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]), ?assert(couch_btree:size(BtAcc2) < PrevSize), {BtAcc2, couch_btree:size(BtAcc2)} - end, {Btree1, couch_btree:size(Btree1)}, KeyValues), + end, + {Btree1, couch_btree:size(Btree1)}, + KeyValues + ), { "Should continuously remove keys from btree", [ @@ -266,48 +286,57 @@ should_insert_keys_in_reversed_order(KeyValues, {_, Btree}) -> {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []), ?assert(couch_btree:size(BtAcc2) > PrevSize), {BtAcc2, couch_btree:size(BtAcc2)} - end, {Btree, couch_btree:size(Btree)}, KeyValuesRev), + end, + {Btree, couch_btree:size(Btree)}, + KeyValuesRev + ), should_produce_valid_btree(Btree1, KeyValues). should_add_every_odd_key_remove_every_even(KeyValues, {_, Btree}) -> {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []), - {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) -> - case Count rem 2 == 0 of - true -> {Count + 1, [X | Left], Right}; - false -> {Count + 1, Left, [X | Right]} - end - end, {0, [], []}, KeyValues), - {timeout, ?TIMEOUT, - ?_assert(test_add_remove(Btree1, Rem2Keys0, Rem2Keys1)) - }. + {_, Rem2Keys0, Rem2Keys1} = lists:foldl( + fun(X, {Count, Left, Right}) -> + case Count rem 2 == 0 of + true -> {Count + 1, [X | Left], Right}; + false -> {Count + 1, Left, [X | Right]} + end + end, + {0, [], []}, + KeyValues + ), + {timeout, ?TIMEOUT, ?_assert(test_add_remove(Btree1, Rem2Keys0, Rem2Keys1))}. should_add_every_even_key_remove_every_old(KeyValues, {_, Btree}) -> {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []), - {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) -> - case Count rem 2 == 0 of - true -> {Count + 1, [X | Left], Right}; - false -> {Count + 1, Left, [X | Right]} - end - end, {0, [], []}, KeyValues), - {timeout, ?TIMEOUT, - ?_assert(test_add_remove(Btree1, Rem2Keys1, Rem2Keys0)) - }. - + {_, Rem2Keys0, Rem2Keys1} = lists:foldl( + fun(X, {Count, Left, Right}) -> + case Count rem 2 == 0 of + true -> {Count + 1, [X | Left], Right}; + false -> {Count + 1, Left, [X | Right]} + end + end, + {0, [], []}, + KeyValues + ), + {timeout, ?TIMEOUT, ?_assert(test_add_remove(Btree1, Rem2Keys1, Rem2Keys0))}. should_reduce_without_specified_direction({_, Btree}) -> ?_assertMatch( {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]}, - fold_reduce(Btree, [])). + fold_reduce(Btree, []) + ). should_reduce_forward({_, Btree}) -> ?_assertMatch( {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, fwd}])). + fold_reduce(Btree, [{dir, fwd}]) + ). should_reduce_backward({_, Btree}) -> ?_assertMatch( {ok, [{{"even", _}, ?ROWS div 2}, {{"odd", _}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, rev}])). + fold_reduce(Btree, [{dir, rev}]) + ). should_reduce_whole_range(fwd, {_, Btree}) -> {SK, EK} = {{"even", 0}, {"odd", ?ROWS - 1}}, @@ -315,20 +344,30 @@ should_reduce_whole_range(fwd, {_, Btree}) -> { "include endkey", ?_assertMatch( - {ok, [{{"odd", 1}, ?ROWS div 2}, - {{"even", 2}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, fwd}, - {start_key, SK}, - {end_key, EK}])) + {ok, [ + {{"odd", 1}, ?ROWS div 2}, + {{"even", 2}, ?ROWS div 2} + ]}, + fold_reduce(Btree, [ + {dir, fwd}, + {start_key, SK}, + {end_key, EK} + ]) + ) }, { "exclude endkey", ?_assertMatch( - {ok, [{{"odd", 1}, (?ROWS div 2) - 1}, - {{"even", 2}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, fwd}, - {start_key, SK}, - {end_key_gt, EK}])) + {ok, [ + {{"odd", 1}, (?ROWS div 2) - 1}, + {{"even", 2}, ?ROWS div 2} + ]}, + fold_reduce(Btree, [ + {dir, fwd}, + {start_key, SK}, + {end_key_gt, EK} + ]) + ) } ]; should_reduce_whole_range(rev, {_, Btree}) -> @@ -337,20 +376,30 @@ should_reduce_whole_range(rev, {_, Btree}) -> { "include endkey", ?_assertMatch( - {ok, [{{"even", ?ROWS}, ?ROWS div 2}, - {{"odd", ?ROWS - 1}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, rev}, - {start_key, SK}, - {end_key, EK}])) + {ok, [ + {{"even", ?ROWS}, ?ROWS div 2}, + {{"odd", ?ROWS - 1}, ?ROWS div 2} + ]}, + fold_reduce(Btree, [ + {dir, rev}, + {start_key, SK}, + {end_key, EK} + ]) + ) }, { "exclude endkey", ?_assertMatch( - {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1}, - {{"odd", ?ROWS - 1}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, rev}, - {start_key, SK}, - {end_key_gt, EK}])) + {ok, [ + {{"even", ?ROWS}, (?ROWS div 2) - 1}, + {{"odd", ?ROWS - 1}, ?ROWS div 2} + ]}, + fold_reduce(Btree, [ + {dir, rev}, + {start_key, SK}, + {end_key_gt, EK} + ]) + ) } ]. @@ -360,19 +409,30 @@ should_reduce_first_half(fwd, {_, Btree}) -> { "include endkey", ?_assertMatch( - {ok, [{{"odd", 1}, ?ROWS div 4}, - {{"even", 2}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, fwd}, - {start_key, SK}, {end_key, EK}])) + {ok, [ + {{"odd", 1}, ?ROWS div 4}, + {{"even", 2}, ?ROWS div 2} + ]}, + fold_reduce(Btree, [ + {dir, fwd}, + {start_key, SK}, + {end_key, EK} + ]) + ) }, { "exclude endkey", ?_assertMatch( - {ok, [{{"odd", 1}, (?ROWS div 4) - 1}, - {{"even", 2}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, fwd}, - {start_key, SK}, - {end_key_gt, EK}])) + {ok, [ + {{"odd", 1}, (?ROWS div 4) - 1}, + {{"even", 2}, ?ROWS div 2} + ]}, + fold_reduce(Btree, [ + {dir, fwd}, + {start_key, SK}, + {end_key_gt, EK} + ]) + ) } ]; should_reduce_first_half(rev, {_, Btree}) -> @@ -381,20 +441,30 @@ should_reduce_first_half(rev, {_, Btree}) -> { "include endkey", ?_assertMatch( - {ok, [{{"even", ?ROWS}, (?ROWS div 4) + 1}, - {{"odd", ?ROWS - 1}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, rev}, - {start_key, SK}, - {end_key, EK}])) + {ok, [ + {{"even", ?ROWS}, (?ROWS div 4) + 1}, + {{"odd", ?ROWS - 1}, ?ROWS div 2} + ]}, + fold_reduce(Btree, [ + {dir, rev}, + {start_key, SK}, + {end_key, EK} + ]) + ) }, { "exclude endkey", ?_assertMatch( - {ok, [{{"even", ?ROWS}, ?ROWS div 4}, - {{"odd", ?ROWS - 1}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, rev}, - {start_key, SK}, - {end_key_gt, EK}])) + {ok, [ + {{"even", ?ROWS}, ?ROWS div 4}, + {{"odd", ?ROWS - 1}, ?ROWS div 2} + ]}, + fold_reduce(Btree, [ + {dir, rev}, + {start_key, SK}, + {end_key_gt, EK} + ]) + ) } ]. @@ -404,20 +474,30 @@ should_reduce_second_half(fwd, {_, Btree}) -> { "include endkey", ?_assertMatch( - {ok, [{{"odd", 1}, ?ROWS div 2}, - {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]}, - fold_reduce(Btree, [{dir, fwd}, - {start_key, SK}, - {end_key, EK}])) + {ok, [ + {{"odd", 1}, ?ROWS div 2}, + {{"even", ?ROWS div 2}, (?ROWS div 4) + 1} + ]}, + fold_reduce(Btree, [ + {dir, fwd}, + {start_key, SK}, + {end_key, EK} + ]) + ) }, { "exclude endkey", ?_assertMatch( - {ok, [{{"odd", 1}, (?ROWS div 2) - 1}, - {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]}, - fold_reduce(Btree, [{dir, fwd}, - {start_key, SK}, - {end_key_gt, EK}])) + {ok, [ + {{"odd", 1}, (?ROWS div 2) - 1}, + {{"even", ?ROWS div 2}, (?ROWS div 4) + 1} + ]}, + fold_reduce(Btree, [ + {dir, fwd}, + {start_key, SK}, + {end_key_gt, EK} + ]) + ) } ]; should_reduce_second_half(rev, {_, Btree}) -> @@ -426,20 +506,30 @@ should_reduce_second_half(rev, {_, Btree}) -> { "include endkey", ?_assertMatch( - {ok, [{{"even", ?ROWS}, ?ROWS div 2}, - {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]}, - fold_reduce(Btree, [{dir, rev}, - {start_key, SK}, - {end_key, EK}])) + {ok, [ + {{"even", ?ROWS}, ?ROWS div 2}, + {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1} + ]}, + fold_reduce(Btree, [ + {dir, rev}, + {start_key, SK}, + {end_key, EK} + ]) + ) }, { "exclude endkey", ?_assertMatch( - {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1}, - {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]}, - fold_reduce(Btree, [{dir, rev}, - {start_key, SK}, - {end_key_gt, EK}])) + {ok, [ + {{"even", ?ROWS}, (?ROWS div 2) - 1}, + {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1} + ]}, + fold_reduce(Btree, [ + {dir, rev}, + {start_key, SK}, + {end_key_gt, EK} + ]) + ) } ]. @@ -459,9 +549,12 @@ fold_reduce(Btree, Opts) -> FoldFun = fun(GroupedKey, Unreduced, Acc) -> {ok, [{GroupedKey, couch_btree:final_reduce(Btree, Unreduced)} | Acc]} end, - couch_btree:fold_reduce(Btree, FoldFun, [], - [{key_group_fun, GroupFun}] ++ Opts). - + couch_btree:fold_reduce( + Btree, + FoldFun, + [], + [{key_group_fun, GroupFun}] ++ Opts + ). keys(KVs) -> [K || {K, _} <- KVs]. @@ -471,7 +564,6 @@ reduce_fun(reduce, KVs) -> reduce_fun(rereduce, Reds) -> lists:sum(Reds). - shuffle(List) -> randomize(round(math:log(length(List)) + 0.5), List). @@ -481,7 +573,10 @@ randomize(T, List) -> lists:foldl( fun(_E, Acc) -> randomize(Acc) - end, randomize(List), lists:seq(1, (T - 1))). + end, + randomize(List), + lists:seq(1, (T - 1)) + ). randomize(List) -> D = lists:map(fun(A) -> {couch_rand:uniform(), A} end, List), @@ -500,18 +595,24 @@ test_add_remove(Btree, OutKeyValues, RemainingKeyValues) -> fun({K, _}, BtAcc) -> {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]), BtAcc2 - end, Btree, OutKeyValues), + end, + Btree, + OutKeyValues + ), true = test_btree(Btree2, RemainingKeyValues), Btree3 = lists:foldl( fun(KV, BtAcc) -> {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []), BtAcc2 - end, Btree2, OutKeyValues), + end, + Btree2, + OutKeyValues + ), true = test_btree(Btree3, OutKeyValues ++ RemainingKeyValues). test_key_access(Btree, List) -> - FoldFun = fun(Element, {[HAcc|TAcc], Count}) -> + FoldFun = fun(Element, {[HAcc | TAcc], Count}) -> case Element == HAcc of true -> {ok, {TAcc, Count + 1}}; _ -> {ok, {TAcc, Count + 1}} @@ -520,8 +621,12 @@ test_key_access(Btree, List) -> Length = length(List), Sorted = lists:sort(List), {ok, _, {[], Length}} = couch_btree:foldl(Btree, FoldFun, {Sorted, 0}), - {ok, _, {[], Length}} = couch_btree:fold(Btree, FoldFun, - {Sorted, 0}, [{dir, rev}]), + {ok, _, {[], Length}} = couch_btree:fold( + Btree, + FoldFun, + {Sorted, 0}, + [{dir, rev}] + ), ok. test_lookup_access(Btree, KeyValues) -> @@ -529,9 +634,15 @@ test_lookup_access(Btree, KeyValues) -> lists:foreach( fun({Key, Value}) -> [{ok, {Key, Value}}] = couch_btree:lookup(Btree, [Key]), - {ok, _, true} = couch_btree:foldl(Btree, FoldFun, - {Key, Value}, [{start_key, Key}]) - end, KeyValues). + {ok, _, true} = couch_btree:foldl( + Btree, + FoldFun, + {Key, Value}, + [{start_key, Key}] + ) + end, + KeyValues + ). test_final_reductions(Btree, KeyValues) -> KVLen = length(KeyValues), @@ -545,18 +656,28 @@ test_final_reductions(Btree, KeyValues) -> CountToEnd = couch_btree:final_reduce(Btree, LeadingReds), {ok, Acc + 1} end, - {LStartKey, _} = case KVLen of - 0 -> {nil, nil}; - _ -> lists:nth(KVLen div 3 + 1, lists:sort(KeyValues)) - end, - {RStartKey, _} = case KVLen of - 0 -> {nil, nil}; - _ -> lists:nth(KVLen div 3, lists:sort(KeyValues)) - end, - {ok, _, FoldLRed} = couch_btree:foldl(Btree, FoldLFun, 0, - [{start_key, LStartKey}]), - {ok, _, FoldRRed} = couch_btree:fold(Btree, FoldRFun, 0, - [{dir, rev}, {start_key, RStartKey}]), + {LStartKey, _} = + case KVLen of + 0 -> {nil, nil}; + _ -> lists:nth(KVLen div 3 + 1, lists:sort(KeyValues)) + end, + {RStartKey, _} = + case KVLen of + 0 -> {nil, nil}; + _ -> lists:nth(KVLen div 3, lists:sort(KeyValues)) + end, + {ok, _, FoldLRed} = couch_btree:foldl( + Btree, + FoldLFun, + 0, + [{start_key, LStartKey}] + ), + {ok, _, FoldRRed} = couch_btree:fold( + Btree, + FoldRFun, + 0, + [{dir, rev}, {start_key, RStartKey}] + ), KVLen = FoldLRed + FoldRRed, ok. diff --git a/src/couch/test/eunit/couch_changes_tests.erl b/src/couch/test/eunit/couch_changes_tests.erl index 848b471f9..02b69f132 100644 --- a/src/couch/test/eunit/couch_changes_tests.erl +++ b/src/couch/test/eunit/couch_changes_tests.erl @@ -28,39 +28,49 @@ setup() -> DbName = ?tempdb(), {ok, Db} = create_db(DbName), - Revs = [R || {ok, R} <- [ - save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}), - save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}), - save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}), - save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}), - save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}) - ]], + Revs = [ + R + || {ok, R} <- [ + save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}), + save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}), + save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}), + save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}), + save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}) + ] + ], Rev = lists:nth(3, Revs), {ok, Db1} = couch_db:reopen(Db), {ok, Rev1} = save_doc(Db1, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev}]}), Revs1 = Revs ++ [Rev1], - Revs2 = Revs1 ++ [R || {ok, R} <- [ - save_doc(Db1, {[{<<"_id">>, <<"doc6">>}]}), - save_doc(Db1, {[{<<"_id">>, <<"_design/foo">>}]}), - save_doc(Db1, {[{<<"_id">>, <<"doc7">>}]}), - save_doc(Db1, {[{<<"_id">>, <<"doc8">>}]}) - ]], - config:set("native_query_servers", "erlang", "{couch_native_process, start_link, []}", _Persist=false), + Revs2 = + Revs1 ++ + [ + R + || {ok, R} <- [ + save_doc(Db1, {[{<<"_id">>, <<"doc6">>}]}), + save_doc(Db1, {[{<<"_id">>, <<"_design/foo">>}]}), + save_doc(Db1, {[{<<"_id">>, <<"doc7">>}]}), + save_doc(Db1, {[{<<"_id">>, <<"doc8">>}]}) + ] + ], + config:set( + "native_query_servers", "erlang", "{couch_native_process, start_link, []}", _Persist = false + ), {DbName, list_to_tuple(Revs2)}. teardown({DbName, _}) -> - config:delete("native_query_servers", "erlang", _Persist=false), + config:delete("native_query_servers", "erlang", _Persist = false), delete_db(DbName), ok. - changes_test_() -> { "Changes feed", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, [ filter_by_selector(), filter_by_doc_id(), @@ -78,7 +88,8 @@ filter_by_doc_id() -> "Filter _doc_id", { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_filter_by_specific_doc_ids/1, fun should_filter_by_specific_doc_ids_descending/1, @@ -94,7 +105,8 @@ filter_by_selector() -> "Filter _selector", { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_select_basic/1, fun should_select_with_since/1, @@ -108,13 +120,13 @@ filter_by_selector() -> } }. - filter_by_design() -> { "Filter _design", { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_emit_only_design_documents/1 ] @@ -138,7 +150,8 @@ filter_by_filter_function() -> "Filter by filters", { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_filter_by_doc_attribute/1, fun should_filter_by_user_ctx/1 @@ -151,7 +164,8 @@ filter_by_view() -> "Filter _view", { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_filter_by_view/1, fun should_filter_by_erlang_view/1 @@ -164,7 +178,8 @@ continuous_feed() -> "Continuous Feed", { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_filter_continuous_feed_by_specific_doc_ids/1, fun should_end_changes_when_db_deleted/1 @@ -172,7 +187,6 @@ continuous_feed() -> } }. - should_filter_by_specific_doc_ids({DbName, _}) -> ?_test( begin @@ -190,7 +204,8 @@ should_filter_by_specific_doc_ids({DbName, _}) -> ?assertEqual(<<"doc3">>, Id2), ?assertEqual(6, Seq2), ?assertEqual(UpSeq, LastSeq) - end). + end + ). should_filter_by_specific_doc_ids_descending({DbName, _}) -> ?_test( @@ -210,7 +225,8 @@ should_filter_by_specific_doc_ids_descending({DbName, _}) -> ?assertEqual(<<"doc4">>, Id2), ?assertEqual(4, Seq2), ?assertEqual(4, LastSeq) - end). + end + ). should_filter_by_specific_doc_ids_with_since({DbName, _}) -> ?_test( @@ -228,7 +244,8 @@ should_filter_by_specific_doc_ids_with_since({DbName, _}) -> ?assertEqual(<<"doc3">>, Id1), ?assertEqual(6, Seq1), ?assertEqual(UpSeq, LastSeq) - end). + end + ). should_filter_by_specific_doc_ids_no_result({DbName, _}) -> ?_test( @@ -243,7 +260,8 @@ should_filter_by_specific_doc_ids_no_result({DbName, _}) -> ?assertEqual(0, length(Rows)), ?assertEqual(UpSeq, LastSeq) - end). + end + ). should_handle_deleted_docs({DbName, Revs}) -> ?_test( @@ -252,9 +270,12 @@ should_handle_deleted_docs({DbName, Revs}) -> {ok, Db} = couch_db:open_int(DbName, []), {ok, _} = save_doc( Db, - {[{<<"_id">>, <<"doc3">>}, - {<<"_deleted">>, true}, - {<<"_rev">>, Rev3_2}]}), + {[ + {<<"_id">>, <<"doc3">>}, + {<<"_deleted">>, true}, + {<<"_rev">>, Rev3_2} + ]} + ), ChArgs = #changes_args{ filter = "_doc_ids", @@ -270,7 +291,8 @@ should_handle_deleted_docs({DbName, Revs}) -> Rows ), ?assertEqual(11, LastSeq) - end). + end + ). should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) -> ?_test( @@ -305,14 +327,29 @@ should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) -> Rev4 = element(4, Revs), Rev3_2 = element(6, Revs), - {ok, Rev4_2} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}, - {<<"_rev">>, Rev4}]}), + {ok, Rev4_2} = save_doc( + Db, + {[ + {<<"_id">>, <<"doc4">>}, + {<<"_rev">>, Rev4} + ]} + ), {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}), - {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}, - {<<"_rev">>, Rev4_2}]}), + {ok, _} = save_doc( + Db, + {[ + {<<"_id">>, <<"doc4">>}, + {<<"_rev">>, Rev4_2} + ]} + ), {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}), - {ok, Rev3_3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}, - {<<"_rev">>, Rev3_2}]}), + {ok, Rev3_3} = save_doc( + Db, + {[ + {<<"_id">>, <<"doc3">>}, + {<<"_rev">>, Rev3_2} + ]} + ), reset_row_notifications(), ok = unpause(Consumer), ?assertEqual(ok, wait_row_notifications(2)), @@ -327,8 +364,13 @@ should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) -> ?assertEqual(17, Row16#row.seq), clear_rows(Consumer), - {ok, _Rev3_4} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}, - {<<"_rev">>, Rev3_3}]}), + {ok, _Rev3_4} = save_doc( + Db, + {[ + {<<"_id">>, <<"doc3">>}, + {<<"_rev">>, Rev3_3} + ]} + ), reset_row_notifications(), ok = unpause(Consumer), ?assertEqual(ok, wait_row_notifications(1)), @@ -340,8 +382,8 @@ should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) -> stop_consumer(Consumer), ?assertMatch([#row{seq = 18, id = <<"doc3">>}], FinalRows) - end). - + end + ). should_end_changes_when_db_deleted({DbName, _Revs}) -> ?_test(begin @@ -361,7 +403,6 @@ should_end_changes_when_db_deleted({DbName, _Revs}) -> ok end). - should_select_basic({DbName, _}) -> ?_test( begin @@ -374,7 +415,8 @@ should_select_basic({DbName, _}) -> ?assertEqual(<<"doc3">>, Id), ?assertEqual(6, Seq), ?assertEqual(UpSeq, LastSeq) - end). + end + ). should_select_with_since({DbName, _}) -> ?_test( @@ -389,7 +431,8 @@ should_select_with_since({DbName, _}) -> ?assertEqual(<<"doc8">>, Id), ?assertEqual(10, Seq), ?assertEqual(UpSeq, LastSeq) - end). + end + ). should_select_when_no_result({DbName, _}) -> ?_test( @@ -400,7 +443,8 @@ should_select_when_no_result({DbName, _}) -> {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), ?assertEqual(0, length(Rows)), ?assertEqual(UpSeq, LastSeq) - end). + end + ). should_select_with_deleted_docs({DbName, Revs}) -> ?_test( @@ -409,9 +453,12 @@ should_select_with_deleted_docs({DbName, Revs}) -> {ok, Db} = couch_db:open_int(DbName, []), {ok, _} = save_doc( Db, - {[{<<"_id">>, <<"doc3">>}, - {<<"_deleted">>, true}, - {<<"_rev">>, Rev3_2}]}), + {[ + {<<"_id">>, <<"doc3">>}, + {<<"_deleted">>, true}, + {<<"_rev">>, Rev3_2} + ]} + ), ChArgs = #changes_args{filter = "_selector"}, Selector = {[{<<"_id">>, <<"doc3">>}]}, Req = {json_req, {[{<<"selector">>, Selector}]}}, @@ -421,7 +468,8 @@ should_select_with_deleted_docs({DbName, Revs}) -> Rows ), ?assertEqual(11, LastSeq) - end). + end + ). should_select_with_continuous({DbName, Revs}) -> ?_test( @@ -437,8 +485,8 @@ should_select_with_continuous({DbName, Revs}) -> ok = pause(Consumer), Rows = get_rows(Consumer), ?assertMatch( - [#row{seq = 10, id = <<"doc8">>, deleted = false}], - Rows + [#row{seq = 10, id = <<"doc8">>, deleted = false}], + Rows ), clear_rows(Consumer), {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc01">>}]}), @@ -448,45 +496,60 @@ should_select_with_continuous({DbName, Revs}) -> ?assertEqual([], get_rows(Consumer)), Rev4 = element(4, Revs), Rev8 = element(10, Revs), - {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}, - {<<"_rev">>, Rev8}]}), - {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}, - {<<"_rev">>, Rev4}]}), + {ok, _} = save_doc( + Db, + {[ + {<<"_id">>, <<"doc8">>}, + {<<"_rev">>, Rev8} + ]} + ), + {ok, _} = save_doc( + Db, + {[ + {<<"_id">>, <<"doc4">>}, + {<<"_rev">>, Rev4} + ]} + ), reset_row_notifications(), ok = unpause(Consumer), ?assertEqual(ok, wait_row_notifications(1)), ok = pause(Consumer), NewRows = get_rows(Consumer), ?assertMatch( - [#row{seq = _, id = <<"doc8">>, deleted = false}], - NewRows + [#row{seq = _, id = <<"doc8">>, deleted = false}], + NewRows ) - end). + end + ). should_stop_selector_when_db_deleted({DbName, _Revs}) -> ?_test( - begin - {ok, _Db} = couch_db:open_int(DbName, []), - ChArgs = #changes_args{filter = "_selector", feed = "continuous"}, - Selector = {[{<<"_id">>, <<"doc3">>}]}, - Req = {json_req, {[{<<"selector">>, Selector}]}}, - Consumer = spawn_consumer(DbName, ChArgs, Req), - ok = pause(Consumer), - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - ok = unpause(Consumer), - {_Rows, _LastSeq} = wait_finished(Consumer), - stop_consumer(Consumer), - ok - end). - + begin + {ok, _Db} = couch_db:open_int(DbName, []), + ChArgs = #changes_args{filter = "_selector", feed = "continuous"}, + Selector = {[{<<"_id">>, <<"doc3">>}]}, + Req = {json_req, {[{<<"selector">>, Selector}]}}, + Consumer = spawn_consumer(DbName, ChArgs, Req), + ok = pause(Consumer), + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + ok = unpause(Consumer), + {_Rows, _LastSeq} = wait_finished(Consumer), + stop_consumer(Consumer), + ok + end + ). should_select_with_empty_fields({DbName, _}) -> ?_test( begin - ChArgs = #changes_args{filter = "_selector", include_docs=true}, + ChArgs = #changes_args{filter = "_selector", include_docs = true}, Selector = {[{<<"_id">>, <<"doc3">>}]}, - Req = {json_req, {[{<<"selector">>, Selector}, - {<<"fields">>, []}]}}, + Req = + {json_req, + {[ + {<<"selector">>, Selector}, + {<<"fields">>, []} + ]}}, {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), ?assertEqual(1, length(Rows)), [#row{seq = Seq, id = Id, doc = Doc}] = Rows, @@ -494,15 +557,20 @@ should_select_with_empty_fields({DbName, _}) -> ?assertEqual(6, Seq), ?assertEqual(UpSeq, LastSeq), ?assertMatch({[{_K1, _V1}, {_K2, _V2}]}, Doc) - end). + end + ). should_select_with_fields({DbName, _}) -> ?_test( begin - ChArgs = #changes_args{filter = "_selector", include_docs=true}, + ChArgs = #changes_args{filter = "_selector", include_docs = true}, Selector = {[{<<"_id">>, <<"doc3">>}]}, - Req = {json_req, {[{<<"selector">>, Selector}, - {<<"fields">>, [<<"_id">>, <<"nope">>]}]}}, + Req = + {json_req, + {[ + {<<"selector">>, Selector}, + {<<"fields">>, [<<"_id">>, <<"nope">>]} + ]}}, {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), ?assertEqual(1, length(Rows)), [#row{seq = Seq, id = Id, doc = Doc}] = Rows, @@ -510,8 +578,8 @@ should_select_with_fields({DbName, _}) -> ?assertEqual(6, Seq), ?assertEqual(UpSeq, LastSeq), ?assertMatch(Doc, {[{<<"_id">>, <<"doc3">>}]}) - end). - + end + ). should_emit_only_design_documents({DbName, Revs}) -> ?_test( @@ -526,11 +594,15 @@ should_emit_only_design_documents({DbName, Revs}) -> ?assertEqual(UpSeq, LastSeq), ?assertEqual([#row{seq = 8, id = <<"_design/foo">>}], Rows), - {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), - {ok, _} = save_doc(Db, {[{<<"_id">>, <<"_design/foo">>}, - {<<"_rev">>, element(8, Revs)}, - {<<"_deleted">>, true}]}), + {ok, _} = save_doc( + Db, + {[ + {<<"_id">>, <<"_design/foo">>}, + {<<"_rev">>, element(8, Revs)}, + {<<"_deleted">>, true} + ]} + ), couch_db:close(Db), {Rows2, LastSeq2, _} = run_changes_query(DbName, ChArgs, Req), @@ -539,11 +611,18 @@ should_emit_only_design_documents({DbName, Revs}) -> ?assertEqual(1, length(Rows2)), ?assertEqual(UpSeq2, LastSeq2), - ?assertEqual([#row{seq = 11, - id = <<"_design/foo">>, - deleted = true}], - Rows2) - end). + ?assertEqual( + [ + #row{ + seq = 11, + id = <<"_design/foo">>, + deleted = true + } + ], + Rows2 + ) + end + ). %% should_receive_heartbeats(_) -> %% {timeout, ?TEST_TIMEOUT div 1000, @@ -616,16 +695,21 @@ should_filter_by_doc_attribute({DbName, _}) -> ?_test( begin DDocId = <<"_design/app">>, - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDocId}, - {<<"language">>, <<"javascript">>}, - {<<"filters">>, {[ - {<<"valid">>, <<"function(doc, req) {" - " if (doc._id == 'doc3') {" - " return true; " - "} }">>} - ]}} - ]}), + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, DDocId}, + {<<"language">>, <<"javascript">>}, + {<<"filters">>, + {[ + {<<"valid">>, << + "function(doc, req) {" + " if (doc._id == 'doc3') {" + " return true; " + "} }" + >>} + ]}} + ]} + ), ChArgs = #changes_args{filter = "app/valid"}, Req = {json_req, null}, ok = update_ddoc(DbName, DDoc), @@ -635,28 +719,38 @@ should_filter_by_doc_attribute({DbName, _}) -> ?assertEqual(<<"doc3">>, Id), ?assertEqual(6, Seq), ?assertEqual(UpSeq, LastSeq) - end). + end + ). should_filter_by_user_ctx({DbName, _}) -> ?_test( begin DDocId = <<"_design/app">>, - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDocId}, - {<<"language">>, <<"javascript">>}, - {<<"filters">>, {[ - {<<"valid">>, <<"function(doc, req) {" - " if (req.userCtx.name == doc._id) {" - " return true; " - "} }">>} - ]}} - ]}), + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, DDocId}, + {<<"language">>, <<"javascript">>}, + {<<"filters">>, + {[ + {<<"valid">>, << + "function(doc, req) {" + " if (req.userCtx.name == doc._id) {" + " return true; " + "} }" + >>} + ]}} + ]} + ), ChArgs = #changes_args{filter = "app/valid"}, UserCtx = #user_ctx{name = <<"doc3">>, roles = []}, {ok, DbRec} = couch_db:clustered_db(DbName, UserCtx), - Req = {json_req, {[{ - <<"userCtx">>, couch_util:json_user_ctx(DbRec) - }]}}, + Req = + {json_req, + {[ + { + <<"userCtx">>, couch_util:json_user_ctx(DbRec) + } + ]}}, ok = update_ddoc(DbName, DDoc), {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), ?assertEqual(1, length(Rows)), @@ -664,30 +758,42 @@ should_filter_by_user_ctx({DbName, _}) -> ?assertEqual(<<"doc3">>, Id), ?assertEqual(6, Seq), ?assertEqual(UpSeq, LastSeq) - end). + end + ). should_filter_by_view({DbName, _}) -> ?_test( begin DDocId = <<"_design/app">>, - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDocId}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {<<"valid">>, {[ - {<<"map">>, <<"function(doc) {" - " if (doc._id == 'doc3') {" - " emit(doc); " - "} }">>} - ]}} - ]}} - ]}), - ChArgs = #changes_args{filter = "_view"}, - Req = {json_req, {[{ - <<"query">>, {[ - {<<"view">>, <<"app/valid">>} + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, DDocId}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, + {[ + {<<"valid">>, + {[ + {<<"map">>, << + "function(doc) {" + " if (doc._id == 'doc3') {" + " emit(doc); " + "} }" + >>} + ]}} + ]}} ]} - }]}}, + ), + ChArgs = #changes_args{filter = "_view"}, + Req = + {json_req, + {[ + { + <<"query">>, + {[ + {<<"view">>, <<"app/valid">>} + ]} + } + ]}}, ok = update_ddoc(DbName, DDoc), {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), ?assertEqual(1, length(Rows)), @@ -695,32 +801,44 @@ should_filter_by_view({DbName, _}) -> ?assertEqual(<<"doc3">>, Id), ?assertEqual(6, Seq), ?assertEqual(UpSeq, LastSeq) - end). + end + ). should_filter_by_erlang_view({DbName, _}) -> ?_test( begin DDocId = <<"_design/app">>, - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDocId}, - {<<"language">>, <<"erlang">>}, - {<<"views">>, {[ - {<<"valid">>, {[ - {<<"map">>, <<"fun({Doc}) ->" - " case lists:keyfind(<<\"_id\">>, 1, Doc) of" - " {<<\"_id\">>, <<\"doc3\">>} -> Emit(Doc, null); " - " false -> ok" - " end " - "end.">>} - ]}} - ]}} - ]}), - ChArgs = #changes_args{filter = "_view"}, - Req = {json_req, {[{ - <<"query">>, {[ - {<<"view">>, <<"app/valid">>} + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, DDocId}, + {<<"language">>, <<"erlang">>}, + {<<"views">>, + {[ + {<<"valid">>, + {[ + {<<"map">>, << + "fun({Doc}) ->" + " case lists:keyfind(<<\"_id\">>, 1, Doc) of" + " {<<\"_id\">>, <<\"doc3\">>} -> Emit(Doc, null); " + " false -> ok" + " end " + "end." + >>} + ]}} + ]}} ]} - }]}}, + ), + ChArgs = #changes_args{filter = "_view"}, + Req = + {json_req, + {[ + { + <<"query">>, + {[ + {<<"view">>, <<"app/valid">>} + ]} + } + ]}}, ok = update_ddoc(DbName, DDoc), {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), ?assertEqual(1, length(Rows)), @@ -728,7 +846,8 @@ should_filter_by_erlang_view({DbName, _}) -> ?assertEqual(<<"doc3">>, Id), ?assertEqual(6, Seq), ?assertEqual(UpSeq, LastSeq) - end). + end + ). update_ddoc(DbName, DDoc) -> {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), @@ -752,12 +871,13 @@ save_doc(Db, Json) -> get_rows({Consumer, _}) -> Ref = make_ref(), Consumer ! {get_rows, Ref}, - Resp = receive - {rows, Ref, Rows} -> - Rows - after ?TIMEOUT -> - timeout - end, + Resp = + receive + {rows, Ref, Rows} -> + Rows + after ?TIMEOUT -> + timeout + end, ?assertNotEqual(timeout, Resp), Resp. @@ -776,48 +896,52 @@ get_rows({Consumer, _}) -> clear_rows({Consumer, _}) -> Ref = make_ref(), Consumer ! {reset, Ref}, - Resp = receive - {ok, Ref} -> - ok - after ?TIMEOUT -> - timeout - end, + Resp = + receive + {ok, Ref} -> + ok + after ?TIMEOUT -> + timeout + end, ?assertNotEqual(timeout, Resp), Resp. stop_consumer({Consumer, _}) -> Ref = make_ref(), Consumer ! {stop, Ref}, - Resp = receive - {ok, Ref} -> - ok - after ?TIMEOUT -> - timeout - end, + Resp = + receive + {ok, Ref} -> + ok + after ?TIMEOUT -> + timeout + end, ?assertNotEqual(timeout, Resp), Resp. pause({Consumer, _}) -> Ref = make_ref(), Consumer ! {pause, Ref}, - Resp = receive - {paused, Ref} -> - ok - after ?TIMEOUT -> - timeout - end, + Resp = + receive + {paused, Ref} -> + ok + after ?TIMEOUT -> + timeout + end, ?assertNotEqual(timeout, Resp), Resp. unpause({Consumer, _}) -> Ref = make_ref(), Consumer ! {continue, Ref}, - Resp = receive - {ok, Ref} -> - ok - after ?TIMEOUT -> - timeout - end, + Resp = + receive + {ok, Ref} -> + ok + after ?TIMEOUT -> + timeout + end, ?assertNotEqual(timeout, Resp), Resp. @@ -828,20 +952,23 @@ wait_finished({_, ConsumerRef}) -> {'DOWN', ConsumerRef, _, _, Msg} when Msg == normal; Msg == ok -> ok; {'DOWN', ConsumerRef, _, _, Msg} -> - erlang:error({consumer_died, [ + erlang:error( + {consumer_died, [ + {module, ?MODULE}, + {line, ?LINE}, + {value, Msg} + ]} + ) + after ?TIMEOUT -> + erlang:error( + {consumer_died, [ {module, ?MODULE}, {line, ?LINE}, - {value, Msg} - ]}) - after ?TIMEOUT -> - erlang:error({consumer_died, [ - {module, ?MODULE}, - {line, ?LINE}, - {value, timeout} - ]}) + {value, timeout} + ]} + ) end. - reset_row_notifications() -> receive row -> @@ -850,7 +977,6 @@ reset_row_notifications() -> ok end. - wait_row_notifications(N) -> receive row when N == 1 -> @@ -861,7 +987,6 @@ wait_row_notifications(N) -> timeout end. - spawn_consumer(DbName, ChangesArgs0, Req) -> Parent = self(), spawn_monitor(fun() -> @@ -884,13 +1009,16 @@ spawn_consumer(DbName, ChangesArgs0, Req) -> maybe_pause(Parent, Acc) end, {ok, Db} = couch_db:open_int(DbName, []), - ChangesArgs = case (ChangesArgs0#changes_args.timeout =:= undefined) - andalso (ChangesArgs0#changes_args.heartbeat =:= undefined) of - true -> - ChangesArgs0#changes_args{timeout = 1000, heartbeat = 100}; - false -> - ChangesArgs0 - end, + ChangesArgs = + case + (ChangesArgs0#changes_args.timeout =:= undefined) andalso + (ChangesArgs0#changes_args.heartbeat =:= undefined) + of + true -> + ChangesArgs0#changes_args{timeout = 1000, heartbeat = 100}; + false -> + ChangesArgs0 + end, FeedFun = couch_changes:handle_db_changes(ChangesArgs, Req, Db), try FeedFun({Callback, []}) @@ -920,11 +1048,14 @@ maybe_pause(Parent, Acc) -> Parent ! {ok, Ref}, throw({stop, Acc}); V when V /= updated -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {value, V}, - {reason, "Received unexpected message"}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {value, V}, + {reason, "Received unexpected message"} + ]} + ) after 0 -> Acc end. diff --git a/src/couch/test/eunit/couch_db_doc_tests.erl b/src/couch/test/eunit/couch_db_doc_tests.erl index 916b63207..dc1ac79e6 100644 --- a/src/couch/test/eunit/couch_db_doc_tests.erl +++ b/src/couch/test/eunit/couch_db_doc_tests.erl @@ -18,7 +18,6 @@ start() -> test_util:start_couch([ioq]). - setup() -> DbName = ?tempdb(), config:set("couchdb", "stem_interactive_updates", "false", false), @@ -26,21 +25,21 @@ setup() -> couch_db:close(Db), DbName. - teardown(DbName) -> ok = couch_server:delete(DbName, [?ADMIN_CTX]), ok. - couch_db_doc_test_() -> { "CouchDB doc tests", { setup, - fun start/0, fun test_util:stop_couch/1, + fun start/0, + fun test_util:stop_couch/1, { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_truncate_number_of_revisions/1, fun should_raise_bad_request_on_invalid_rev/1, @@ -50,7 +49,6 @@ couch_db_doc_test_() -> } }. - should_truncate_number_of_revisions(DbName) -> DocId = <<"foo">>, Db = open_db(DbName), @@ -60,7 +58,6 @@ should_truncate_number_of_revisions(DbName) -> {ok, [{ok, #doc{revs = {11, Revs}}}]} = open_doc_rev(Db, DocId, Rev10), ?_assertEqual(5, length(Revs)). - should_raise_bad_request_on_invalid_rev(DbName) -> DocId = <<"foo">>, InvalidRev1 = <<"foo">>, @@ -70,18 +67,15 @@ should_raise_bad_request_on_invalid_rev(DbName) -> Db = open_db(DbName), create_doc(Db, DocId), [ - {InvalidRev1, - ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev1, 1))}, - {InvalidRev2, - ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev2, 1))}, - {InvalidRev3, - ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev3, 1))} + {InvalidRev1, ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev1, 1))}, + {InvalidRev2, ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev2, 1))}, + {InvalidRev3, ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev3, 1))} ]. should_allow_access_in_doc_keys_test(_DbName) -> Json = <<"{\"_id\":\"foo\",\"_access\":[\"test\"]}">>, EJson = couch_util:json_decode(Json), - Expected = {[{<<"_id">>,<<"foo">>}, {<<"_access">>, [<<"test">>]}]}, + Expected = {[{<<"_id">>, <<"foo">>}, {<<"_access">>, [<<"test">>]}]}, EJson = Expected, Doc = couch_doc:from_json_obj(EJson), NewEJson = couch_doc:to_json_obj(Doc, []), @@ -91,31 +85,33 @@ open_db(DbName) -> {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), Db. - create_doc(Db, DocId) -> add_revision(Db, DocId, undefined). - open_doc_rev(Db0, DocId, Rev) -> {ok, Db} = couch_db:reopen(Db0), couch_db:open_doc_revs(Db, DocId, [couch_doc:parse_rev(Rev)], []). - add_revision(Db, DocId, undefined) -> add_revision(Db, DocId, []); add_revision(Db, DocId, Rev) when is_binary(Rev) -> add_revision(Db, DocId, [{<<"_rev">>, Rev}]); add_revision(Db0, DocId, Rev) -> {ok, Db} = couch_db:reopen(Db0), - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, DocId}, - {<<"value">>, DocId} - ] ++ Rev}), + Doc = couch_doc:from_json_obj({ + [ + {<<"_id">>, DocId}, + {<<"value">>, DocId} + ] ++ Rev + }), {ok, NewRev} = couch_db:update_doc(Db, Doc, []), couch_doc:rev_to_str(NewRev). - add_revisions(Db, DocId, Rev, N) -> - lists:foldl(fun(_, OldRev) -> - add_revision(Db, DocId, OldRev) - end, Rev, lists:seq(1, N)). + lists:foldl( + fun(_, OldRev) -> + add_revision(Db, DocId, OldRev) + end, + Rev, + lists:seq(1, N) + ). diff --git a/src/couch/test/eunit/couch_db_mpr_tests.erl b/src/couch/test/eunit/couch_db_mpr_tests.erl index bb97c66d7..3a9577a0d 100644 --- a/src/couch/test/eunit/couch_db_mpr_tests.erl +++ b/src/couch/test/eunit/couch_db_mpr_tests.erl @@ -12,7 +12,6 @@ -module(couch_db_mpr_tests). - -include_lib("couch/include/couch_eunit.hrl"). -include_lib("couch/include/couch_db.hrl"). @@ -24,41 +23,35 @@ -define(CONTENT_JSON, {"Content-Type", "application/json"}). -define(JSON_BODY, "{\"foo\": \"bar\"}"). -define(CONTENT_MULTI_RELATED, - {"Content-Type", "multipart/related;boundary=\"bound\""}). - + {"Content-Type", "multipart/related;boundary=\"bound\""} +). setup() -> Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false), TmpDb = ?tempdb(), Addr = config:get("httpd", "bind_address", "127.0.0.1"), Port = mochiweb_socket_server:get(couch_httpd, port), Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), Url. - teardown(Url) -> catch delete_db(Url), - ok = config:delete("admins", ?USER, _Persist=false). - + ok = config:delete("admins", ?USER, _Persist = false). create_db(Url) -> {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), ?assert(Status =:= 201 orelse Status =:= 202). - delete_db(Url) -> {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). - create_doc(Url, Id, Body, Type) -> test_request:put(Url ++ "/" ++ Id, [Type, ?AUTH], Body). - delete_doc(Url, Id, Rev) -> test_request:delete(Url ++ "/" ++ Id ++ "?rev=" ++ ?b2l(Rev)). - couch_db_mpr_test_() -> { "multi-part attachment tests", @@ -77,30 +70,29 @@ couch_db_mpr_test_() -> } }. - recreate_with_mpr(Url) -> - {timeout, ?TIMEOUT, ?_test(begin - DocId1 = "foo", - DocId2 = "bar", - - create_db(Url), - create_and_delete_doc(Url, DocId1), - Rev1 = create_with_mpr(Url, DocId1), - delete_db(Url), - - create_db(Url), - create_and_delete_doc(Url, DocId1), - % We create a second unrelated doc to change the - % position on disk where the attachment is written - % so that we can assert that the position on disk - % is not included when calculating a revision. - create_and_delete_doc(Url, DocId2), - Rev2 = create_with_mpr(Url, DocId1), - delete_db(Url), - - ?assertEqual(Rev1, Rev2) - end)}. - + {timeout, ?TIMEOUT, + ?_test(begin + DocId1 = "foo", + DocId2 = "bar", + + create_db(Url), + create_and_delete_doc(Url, DocId1), + Rev1 = create_with_mpr(Url, DocId1), + delete_db(Url), + + create_db(Url), + create_and_delete_doc(Url, DocId1), + % We create a second unrelated doc to change the + % position on disk where the attachment is written + % so that we can assert that the position on disk + % is not included when calculating a revision. + create_and_delete_doc(Url, DocId2), + Rev2 = create_with_mpr(Url, DocId1), + delete_db(Url), + + ?assertEqual(Rev1, Rev2) + end)}. create_and_delete_doc(Url, DocId) -> {ok, _, _, Resp} = create_doc(Url, DocId, ?JSON_BODY, ?CONTENT_JSON), @@ -109,7 +101,6 @@ create_and_delete_doc(Url, DocId) -> ?assert(is_binary(Rev)), {ok, _, _, _} = delete_doc(Url, DocId, Rev). - create_with_mpr(Url, DocId) -> {ok, _, _, Resp} = create_doc(Url, DocId, mpr(), ?CONTENT_MULTI_RELATED), {Props} = ?JSON_DECODE(Resp), @@ -117,19 +108,18 @@ create_with_mpr(Url, DocId) -> ?assert(is_binary(Rev)), Rev. - mpr() -> lists:concat([ "--bound\r\n", "Content-Type: application/json\r\n\r\n", "{", - "\"body\":\"stuff\"," - "\"_attachments\":", - "{\"foo.txt\":{", - "\"follows\":true,", - "\"content_type\":\"text/plain\"," - "\"length\":21", - "}}" + "\"body\":\"stuff\"," + "\"_attachments\":", + "{\"foo.txt\":{", + "\"follows\":true,", + "\"content_type\":\"text/plain\"," + "\"length\":21", + "}}" "}", "\r\n--bound\r\n\r\n", "this is 21 chars long", diff --git a/src/couch/test/eunit/couch_db_plugin_tests.erl b/src/couch/test/eunit/couch_db_plugin_tests.erl index 93551adbc..bcfbffb05 100644 --- a/src/couch/test/eunit/couch_db_plugin_tests.erl +++ b/src/couch/test/eunit/couch_db_plugin_tests.erl @@ -21,7 +21,8 @@ on_delete/2 ]). --export([ %% couch_epi_plugin behaviour +%% couch_epi_plugin behaviour +-export([ app/0, providers/0, services/0, @@ -58,12 +59,15 @@ validate_dbname({false, _Db}, _) -> {decided, false}; validate_dbname({fail, _Db}, _) -> throw(validate_dbname); validate_dbname({pass, _Db}, _) -> no_decision. -before_doc_update({fail, _Doc}, _Db, interactive_edit) -> throw(before_doc_update); -before_doc_update({true, Doc}, Db, interactive_edit) -> [{true, [before_doc_update|Doc]}, Db, interactive_edit]; -before_doc_update({false, Doc}, Db, interactive_edit) -> [{false, Doc}, Db, interactive_edit]. +before_doc_update({fail, _Doc}, _Db, interactive_edit) -> + throw(before_doc_update); +before_doc_update({true, Doc}, Db, interactive_edit) -> + [{true, [before_doc_update | Doc]}, Db, interactive_edit]; +before_doc_update({false, Doc}, Db, interactive_edit) -> + [{false, Doc}, Db, interactive_edit]. after_doc_read({fail, _Doc}, _Db) -> throw(after_doc_read); -after_doc_read({true, Doc}, Db) -> [{true, [after_doc_read|Doc]}, Db]; +after_doc_read({true, Doc}, Db) -> [{true, [after_doc_read | Doc]}, Db]; after_doc_read({false, Doc}, Db) -> [{false, Doc}, Db]. validate_docid({true, _Id}) -> true; @@ -82,7 +86,9 @@ callback_test_() -> { "callback tests", { - setup, fun setup/0, fun teardown/1, + setup, + fun setup/0, + fun teardown/1, [ {"validate_dbname_match", fun validate_dbname_match/0}, {"validate_dbname_no_match", fun validate_dbname_no_match/0}, @@ -112,59 +118,77 @@ callback_test_() -> } }. - validate_dbname_match() -> - ?assert(couch_db_plugin:validate_dbname( - {true, [db]}, db, fun(_, _) -> pass end)). + ?assert( + couch_db_plugin:validate_dbname( + {true, [db]}, db, fun(_, _) -> pass end + ) + ). validate_dbname_no_match() -> - ?assertNot(couch_db_plugin:validate_dbname( - {false, [db]}, db, fun(_, _) -> pass end)). + ?assertNot( + couch_db_plugin:validate_dbname( + {false, [db]}, db, fun(_, _) -> pass end + ) + ). validate_dbname_throw() -> ?assertThrow( validate_dbname, couch_db_plugin:validate_dbname( - {fail, [db]}, db, fun(_, _) -> pass end)). + {fail, [db]}, db, fun(_, _) -> pass end + ) + ). validate_dbname_pass() -> - ?assertEqual(pass, couch_db_plugin:validate_dbname( - {pass, [db]}, db, fun(_, _) -> pass end)). + ?assertEqual( + pass, + couch_db_plugin:validate_dbname( + {pass, [db]}, db, fun(_, _) -> pass end + ) + ). before_doc_update_match() -> ?assertMatch( {true, [before_doc_update, doc]}, couch_db_plugin:before_doc_update( - fake_db(), {true, [doc]}, interactive_edit)). + fake_db(), {true, [doc]}, interactive_edit + ) + ). before_doc_update_no_match() -> ?assertMatch( {false, [doc]}, couch_db_plugin:before_doc_update( - fake_db(), {false, [doc]}, interactive_edit)). + fake_db(), {false, [doc]}, interactive_edit + ) + ). before_doc_update_throw() -> ?assertThrow( before_doc_update, couch_db_plugin:before_doc_update( - fake_db(), {fail, [doc]}, interactive_edit)). - + fake_db(), {fail, [doc]}, interactive_edit + ) + ). after_doc_read_match() -> ?assertMatch( {true, [after_doc_read, doc]}, - couch_db_plugin:after_doc_read(fake_db(), {true, [doc]})). + couch_db_plugin:after_doc_read(fake_db(), {true, [doc]}) + ). after_doc_read_no_match() -> ?assertMatch( {false, [doc]}, - couch_db_plugin:after_doc_read(fake_db(), {false, [doc]})). + couch_db_plugin:after_doc_read(fake_db(), {false, [doc]}) + ). after_doc_read_throw() -> ?assertThrow( after_doc_read, - couch_db_plugin:after_doc_read(fake_db(), {fail, [doc]})). - + couch_db_plugin:after_doc_read(fake_db(), {fail, [doc]}) + ). validate_docid_match() -> ?assert(couch_db_plugin:validate_docid({true, [doc]})). @@ -175,8 +199,8 @@ validate_docid_no_match() -> validate_docid_throw() -> ?assertThrow( validate_docid, - couch_db_plugin:validate_docid({fail, [doc]})). - + couch_db_plugin:validate_docid({fail, [doc]}) + ). check_is_admin_match() -> ?assert(couch_db_plugin:check_is_admin({true, [db]})). @@ -187,19 +211,23 @@ check_is_admin_no_match() -> check_is_admin_throw() -> ?assertThrow( check_is_admin, - couch_db_plugin:check_is_admin({fail, [db]})). + couch_db_plugin:check_is_admin({fail, [db]}) + ). on_delete_match() -> ?assertMatch( - [true], - couch_db_plugin:on_delete(true, [])). + [true], + couch_db_plugin:on_delete(true, []) + ). on_delete_no_match() -> ?assertMatch( - [false], - couch_db_plugin:on_delete(false, [])). + [false], + couch_db_plugin:on_delete(false, []) + ). on_delete_throw() -> ?assertThrow( on_delete, - couch_db_plugin:on_delete(fail, [])). + couch_db_plugin:on_delete(fail, []) + ). diff --git a/src/couch/test/eunit/couch_db_props_upgrade_tests.erl b/src/couch/test/eunit/couch_db_props_upgrade_tests.erl index 40ad283cf..5ca658129 100644 --- a/src/couch/test/eunit/couch_db_props_upgrade_tests.erl +++ b/src/couch/test/eunit/couch_db_props_upgrade_tests.erl @@ -16,7 +16,6 @@ -include_lib("couch/include/couch_db.hrl"). -include_lib("couch_mrview/include/couch_mrview.hrl"). - setup() -> DbName = <<"test">>, DbFileName = "test.couch", @@ -30,12 +29,10 @@ setup() -> DbName. - teardown(DbName) when is_binary(DbName) -> couch_server:delete(DbName, [?ADMIN_CTX]), ok. - old_db_info_test_() -> { "Old database versions work", @@ -56,14 +53,12 @@ old_db_info_test_() -> } }. - can_get_props(DbName) -> ?_test(begin {ok, Db} = couch_db:open_int(DbName, []), Props = couch_db_engine:get_props(Db), ?assert(is_list(Props)) - end). - + end). can_get_db_info(DbName) -> ?_test(begin @@ -71,8 +66,7 @@ can_get_db_info(DbName) -> {ok, Info} = couch_db:get_db_info(Db), Props = couch_util:get_value(props, Info), ?assertEqual({[]}, Props) - end). - + end). can_compact_db(DbName) -> ?_test(begin diff --git a/src/couch/test/eunit/couch_db_split_tests.erl b/src/couch/test/eunit/couch_db_split_tests.erl index b52184a8c..f0baa920b 100644 --- a/src/couch/test/eunit/couch_db_split_tests.erl +++ b/src/couch/test/eunit/couch_db_split_tests.erl @@ -16,8 +16,8 @@ -include_lib("couch/include/couch_db.hrl"). -define(RINGTOP, 2 bsl 31). --define(TIMEOUT, 60). % seconds - +% seconds +-define(TIMEOUT, 60). setup() -> DbName = ?tempdb(), @@ -25,14 +25,12 @@ setup() -> ok = couch_db:close(Db), DbName. - teardown(DbName) -> {ok, Db} = couch_db:open_int(DbName, []), FilePath = couch_db:get_filepath(Db), ok = couch_db:close(Db), ok = file:delete(FilePath). - split_test_() -> Cases = [ {"Should split an empty shard", 0, 2}, @@ -42,16 +40,19 @@ split_test_() -> ], { setup, - fun test_util:start_couch/0, fun test_util:stop/1, + fun test_util:start_couch/0, + fun test_util:stop/1, [ { foreachx, - fun(_) -> setup() end, fun(_, St) -> teardown(St) end, + fun(_) -> setup() end, + fun(_, St) -> teardown(St) end, [{Case, fun should_split_shard/2} || Case <- Cases] }, { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_fail_on_missing_source/1, fun should_fail_on_existing_target/1, @@ -63,37 +64,43 @@ split_test_() -> ] }. - should_split_shard({Desc, TotalDocs, Q}, DbName) -> {ok, ExpectSeq} = create_docs(DbName, TotalDocs), Ranges = make_ranges(Q), TMap = make_targets(Ranges), DocsPerRange = TotalDocs div Q, PickFun = make_pickfun(DocsPerRange), - {Desc, timeout, ?TIMEOUT, ?_test(begin - {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun), - ?assertEqual(ExpectSeq, UpdateSeq), - maps:map(fun(Range, Name) -> - {ok, Db} = couch_db:open_int(Name, []), - FilePath = couch_db:get_filepath(Db), - %% target actually exists - ?assertMatch({ok, _}, file:read_file_info(FilePath)), - %% target's update seq is the same as source's update seq - USeq = couch_db:get_update_seq(Db), - ?assertEqual(ExpectSeq, USeq), - %% target shard has all the expected in its range docs - {ok, DocsInShard} = couch_db:fold_docs(Db, fun(FDI, Acc) -> - DocId = FDI#full_doc_info.id, - ExpectedRange = PickFun(DocId, Ranges, undefined), - ?assertEqual(ExpectedRange, Range), - {ok, Acc + 1} - end, 0), - ?assertEqual(DocsPerRange, DocsInShard), - ok = couch_db:close(Db), - ok = file:delete(FilePath) - end, TMap) - end)}. - + {Desc, timeout, ?TIMEOUT, + ?_test(begin + {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun), + ?assertEqual(ExpectSeq, UpdateSeq), + maps:map( + fun(Range, Name) -> + {ok, Db} = couch_db:open_int(Name, []), + FilePath = couch_db:get_filepath(Db), + %% target actually exists + ?assertMatch({ok, _}, file:read_file_info(FilePath)), + %% target's update seq is the same as source's update seq + USeq = couch_db:get_update_seq(Db), + ?assertEqual(ExpectSeq, USeq), + %% target shard has all the expected in its range docs + {ok, DocsInShard} = couch_db:fold_docs( + Db, + fun(FDI, Acc) -> + DocId = FDI#full_doc_info.id, + ExpectedRange = PickFun(DocId, Ranges, undefined), + ?assertEqual(ExpectedRange, Range), + {ok, Acc + 1} + end, + 0 + ), + ?assertEqual(DocsPerRange, DocsInShard), + ok = couch_db:close(Db), + ok = file:delete(FilePath) + end, + TMap + ) + end)}. should_fail_on_missing_source(_DbName) -> DbName = ?tempdb(), @@ -102,59 +109,67 @@ should_fail_on_missing_source(_DbName) -> Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3), ?_assertEqual({error, missing_source}, Response). - should_fail_on_existing_target(DbName) -> Ranges = make_ranges(2), - TMap = maps:map(fun(_, TName) -> - % We create the target but make sure to remove it from the cache so we - % hit the eexist error instaed of already_opened - {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]), - Pid = couch_db:get_pid(Db), - ok = couch_db:close(Db), - exit(Pid, kill), - test_util:wait(fun() -> - case ets:lookup(couch_server:couch_dbs(DbName), TName) of - [] -> ok; - [_ | _] -> wait - end - end), - TName - end, make_targets(Ranges)), + TMap = maps:map( + fun(_, TName) -> + % We create the target but make sure to remove it from the cache so we + % hit the eexist error instaed of already_opened + {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]), + Pid = couch_db:get_pid(Db), + ok = couch_db:close(Db), + exit(Pid, kill), + test_util:wait(fun() -> + case ets:lookup(couch_server:couch_dbs(DbName), TName) of + [] -> ok; + [_ | _] -> wait + end + end), + TName + end, + make_targets(Ranges) + ), Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3), ?_assertMatch({error, {target_create_error, _, eexist}}, Response). - should_fail_on_invalid_target_name(DbName) -> Ranges = make_ranges(2), - TMap = maps:map(fun([B, _], _) -> - iolist_to_binary(["_$", couch_util:to_hex(<<B:32/integer>>)]) - end, make_targets(Ranges)), - Expect = {error, {target_create_error, <<"_$00000000">>, - {illegal_database_name, <<"_$00000000">>}}}, + TMap = maps:map( + fun([B, _], _) -> + iolist_to_binary(["_$", couch_util:to_hex(<<B:32/integer>>)]) + end, + make_targets(Ranges) + ), + Expect = + {error, {target_create_error, <<"_$00000000">>, {illegal_database_name, <<"_$00000000">>}}}, Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3), ?_assertMatch(Expect, Response). - should_crash_on_invalid_tmap(DbName) -> Ranges = make_ranges(1), TMap = make_targets(Ranges), - ?_assertError(function_clause, - couch_db_split:split(DbName, TMap, fun fake_pickfun/3)). - + ?_assertError( + function_clause, + couch_db_split:split(DbName, TMap, fun fake_pickfun/3) + ). should_fail_on_opened_target(DbName) -> Ranges = make_ranges(2), - TMap = maps:map(fun(_, TName) -> - % We create and keep the target open but delete - % its file on disk so we don't fail with eexist - {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]), - FilePath = couch_db:get_filepath(Db), - ok = file:delete(FilePath), - TName - end, make_targets(Ranges)), - ?_assertMatch({error, {target_create_error, _, already_opened}}, - couch_db_split:split(DbName, TMap, fun fake_pickfun/3)). - + TMap = maps:map( + fun(_, TName) -> + % We create and keep the target open but delete + % its file on disk so we don't fail with eexist + {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]), + FilePath = couch_db:get_filepath(Db), + ok = file:delete(FilePath), + TName + end, + make_targets(Ranges) + ), + ?_assertMatch( + {error, {target_create_error, _, already_opened}}, + couch_db_split:split(DbName, TMap, fun fake_pickfun/3) + ). copy_local_docs_test_() -> Cases = [ @@ -165,46 +180,55 @@ copy_local_docs_test_() -> ], { setup, - fun test_util:start_couch/0, fun test_util:stop/1, + fun test_util:start_couch/0, + fun test_util:stop/1, [ { foreachx, - fun(_) -> setup() end, fun(_, St) -> teardown(St) end, + fun(_) -> setup() end, + fun(_, St) -> teardown(St) end, [{Case, fun should_copy_local_docs/2} || Case <- Cases] }, {"Should return error on missing source", - fun should_fail_copy_local_on_missing_source/0} + fun should_fail_copy_local_on_missing_source/0} ] }. - should_copy_local_docs({Desc, TotalDocs, Q}, DbName) -> {ok, ExpectSeq} = create_docs(DbName, TotalDocs), Ranges = make_ranges(Q), TMap = make_targets(Ranges), DocsPerRange = TotalDocs div Q, PickFun = make_pickfun(DocsPerRange), - {Desc, timeout, ?TIMEOUT, ?_test(begin - {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun), - ?assertEqual(ExpectSeq, UpdateSeq), - Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun), - ?assertEqual(ok, Response), - maps:map(fun(Range, Name) -> - {ok, Db} = couch_db:open_int(Name, []), - FilePath = couch_db:get_filepath(Db), - %% target shard has all the expected in its range docs - {ok, DocsInShard} = couch_db:fold_local_docs(Db, fun(Doc, Acc) -> - DocId = Doc#doc.id, - ExpectedRange = PickFun(DocId, Ranges, undefined), - ?assertEqual(ExpectedRange, Range), - {ok, Acc + 1} - end, 0, []), - ?assertEqual(DocsPerRange, DocsInShard), - ok = couch_db:close(Db), - ok = file:delete(FilePath) - end, TMap) - end)}. - + {Desc, timeout, ?TIMEOUT, + ?_test(begin + {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun), + ?assertEqual(ExpectSeq, UpdateSeq), + Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun), + ?assertEqual(ok, Response), + maps:map( + fun(Range, Name) -> + {ok, Db} = couch_db:open_int(Name, []), + FilePath = couch_db:get_filepath(Db), + %% target shard has all the expected in its range docs + {ok, DocsInShard} = couch_db:fold_local_docs( + Db, + fun(Doc, Acc) -> + DocId = Doc#doc.id, + ExpectedRange = PickFun(DocId, Ranges, undefined), + ?assertEqual(ExpectedRange, Range), + {ok, Acc + 1} + end, + 0, + [] + ), + ?assertEqual(DocsPerRange, DocsInShard), + ok = couch_db:close(Db), + ok = file:delete(FilePath) + end, + TMap + ) + end)}. should_fail_copy_local_on_missing_source() -> DbName = ?tempdb(), @@ -214,23 +238,23 @@ should_fail_copy_local_on_missing_source() -> Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun), ?assertEqual({error, missing_source}, Response). - cleanup_target_test_() -> { setup, - fun test_util:start_couch/0, fun test_util:stop/1, + fun test_util:start_couch/0, + fun test_util:stop/1, [ { setup, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, fun should_delete_existing_targets/1 }, {"Should return error on missing source", - fun should_fail_cleanup_target_on_missing_source/0} + fun should_fail_cleanup_target_on_missing_source/0} ] }. - should_delete_existing_targets(SourceName) -> {ok, ExpectSeq} = create_docs(SourceName, 100), Ranges = make_ranges(2), @@ -239,25 +263,26 @@ should_delete_existing_targets(SourceName) -> ?_test(begin {ok, UpdateSeq} = couch_db_split:split(SourceName, TMap, PickFun), ?assertEqual(ExpectSeq, UpdateSeq), - maps:map(fun(_Range, TargetName) -> - FilePath = couch_util:with_db(TargetName, fun(Db) -> - couch_db:get_filepath(Db) - end), - ?assertMatch({ok, _}, file:read_file_info(FilePath)), - Response = couch_db_split:cleanup_target(SourceName, TargetName), - ?assertEqual(ok, Response), - ?assertEqual({error, enoent}, file:read_file_info(FilePath)) - end, TMap) + maps:map( + fun(_Range, TargetName) -> + FilePath = couch_util:with_db(TargetName, fun(Db) -> + couch_db:get_filepath(Db) + end), + ?assertMatch({ok, _}, file:read_file_info(FilePath)), + Response = couch_db_split:cleanup_target(SourceName, TargetName), + ?assertEqual(ok, Response), + ?assertEqual({error, enoent}, file:read_file_info(FilePath)) + end, + TMap + ) end). - should_fail_cleanup_target_on_missing_source() -> SourceName = ?tempdb(), TargetName = ?tempdb(), Response = couch_db_split:cleanup_target(SourceName, TargetName), ?assertEqual({error, missing_source}, Response). - make_pickfun(DocsPerRange) -> fun(DocId, Ranges, _HashFun) -> Id = docid_to_integer(DocId), @@ -269,41 +294,47 @@ make_pickfun(DocsPerRange) -> end end. - fake_pickfun(_, Ranges, _) -> hd(Ranges). - make_targets([]) -> maps:new(); -make_targets(Ranges) -> - Targets = lists:map(fun(Range) -> - {Range, ?tempdb()} - end, Ranges), +make_targets(Ranges) -> + Targets = lists:map( + fun(Range) -> + {Range, ?tempdb()} + end, + Ranges + ), maps:from_list(Targets). - make_ranges(Q) when Q > 0 -> Incr = (2 bsl 31) div Q, - lists:map(fun - (End) when End >= ?RINGTOP - 1 -> - [End - Incr, ?RINGTOP - 1]; - (End) -> - [End - Incr, End - 1] - end, lists:seq(Incr, ?RINGTOP, Incr)); + lists:map( + fun + (End) when End >= ?RINGTOP - 1 -> + [End - Incr, ?RINGTOP - 1]; + (End) -> + [End - Incr, End - 1] + end, + lists:seq(Incr, ?RINGTOP, Incr) + ); make_ranges(_) -> []. - create_docs(DbName, 0) -> couch_util:with_db(DbName, fun(Db) -> UpdateSeq = couch_db:get_update_seq(Db), {ok, UpdateSeq} end); create_docs(DbName, DocNum) -> - Docs = lists:foldl(fun(I, Acc) -> - [create_doc(I), create_local_doc(I) | Acc] - end, [], lists:seq(DocNum, 1, -1)), + Docs = lists:foldl( + fun(I, Acc) -> + [create_doc(I), create_local_doc(I) | Acc] + end, + [], + lists:seq(DocNum, 1, -1) + ), couch_util:with_db(DbName, fun(Db) -> {ok, _Result} = couch_db:update_docs(Db, Docs), {ok, Db1} = couch_db:reopen(Db), @@ -311,20 +342,16 @@ create_docs(DbName, DocNum) -> {ok, UpdateSeq} end). - create_doc(I) -> create_prefix_id_doc(I, ""). - create_local_doc(I) -> create_prefix_id_doc(I, "_local/"). - create_prefix_id_doc(I, Prefix) -> Id = iolist_to_binary(io_lib:format(Prefix ++ "~3..0B", [I])), couch_doc:from_json_obj({[{<<"_id">>, Id}, {<<"value">>, I}]}). - docid_to_integer(<<"_local/", DocId/binary>>) -> docid_to_integer(DocId); docid_to_integer(DocId) -> diff --git a/src/couch/test/eunit/couch_db_tests.erl b/src/couch/test/eunit/couch_db_tests.erl index d52a15597..82137dc40 100644 --- a/src/couch/test/eunit/couch_db_tests.erl +++ b/src/couch/test/eunit/couch_db_tests.erl @@ -16,14 +16,13 @@ -define(TIMEOUT, 120). - - -create_delete_db_test_()-> +create_delete_db_test_() -> { "Database create/delete tests", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, fun() -> ?tempdb() end, @@ -35,12 +34,13 @@ create_delete_db_test_()-> } }. -create_delete_multiple_dbs_test_()-> +create_delete_multiple_dbs_test_() -> { "Multiple database create/delete tests", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, fun() -> [?tempdb() || _ <- lists:seq(1, 6)] end, @@ -57,7 +57,8 @@ create_delete_database_continuously_test_() -> "Continious database create/delete tests", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreachx, fun(_) -> ?tempdb() end, @@ -69,12 +70,13 @@ create_delete_database_continuously_test_() -> } }. -open_db_test_()-> +open_db_test_() -> { "Database open tests", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, fun() -> ?tempdb() end, @@ -87,7 +89,6 @@ open_db_test_()-> } }. - should_create_db(DbName) -> ?_test(begin {ok, Before} = couch_server:all_databases(), @@ -109,10 +110,12 @@ should_delete_db(DbName) -> should_create_multiple_dbs(DbNames) -> ?_test(begin - [gen_server:call(couch_server:couch_server(N), {set_max_dbs_open, 3}) || - N <- lists:seq(1, couch_server:num_servers())], + [ + gen_server:call(couch_server:couch_server(N), {set_max_dbs_open, 3}) + || N <- lists:seq(1, couch_server:num_servers()) + ], {ok, Before} = couch_server:all_databases(), - [?assertNot(lists:member(DbName, Before)) || DbName <- DbNames], + [?assertNot(lists:member(DbName, Before)) || DbName <- DbNames], [?assert(create_db(DbName)) || DbName <- DbNames], {ok, After} = couch_server:all_databases(), [?assert(lists:member(DbName, After)) || DbName <- DbNames] @@ -122,21 +125,27 @@ should_delete_multiple_dbs(DbNames) -> ?_test(begin [?assert(create_db(DbName)) || DbName <- DbNames], {ok, Before} = couch_server:all_databases(), - [?assert(lists:member(DbName, Before)) || DbName <- DbNames], + [?assert(lists:member(DbName, Before)) || DbName <- DbNames], [?assert(delete_db(DbName)) || DbName <- DbNames], {ok, After} = couch_server:all_databases(), [?assertNot(lists:member(DbName, After)) || DbName <- DbNames] end). should_create_delete_database_continuously(Times, DbName) -> - {lists:flatten(io_lib:format("~b times", [Times])), - {timeout, ?TIMEOUT, ?_test(begin - ?assert(create_db(DbName)), - lists:foreach(fun(_) -> - ?assert(delete_db(DbName)), - ?assert(create_db(DbName)) - end, lists:seq(1, Times)) - end)}}. + { + lists:flatten(io_lib:format("~b times", [Times])), + {timeout, ?TIMEOUT, + ?_test(begin + ?assert(create_db(DbName)), + lists:foreach( + fun(_) -> + ?assert(delete_db(DbName)), + ?assert(create_db(DbName)) + end, + lists:seq(1, Times) + ) + end)} + }. should_create_db_if_missing(DbName) -> ?_test(begin @@ -165,8 +174,10 @@ locking_should_work(DbName) -> ?assertEqual({error, {locked, <<"x">>}}, couch_db:create(DbName, [])), ?assertEqual(ok, couch_server:unlock(DbName)), {ok, Db} = couch_db:create(DbName, []), - ?assertEqual({error, already_opened}, - couch_server:lock(DbName, <<>>)), + ?assertEqual( + {error, already_opened}, + couch_server:lock(DbName, <<>>) + ), ok = couch_db:close(Db), catch exit(couch_db:get_pid(Db), kill), @@ -175,11 +186,13 @@ locking_should_work(DbName) -> [] -> ok; [_ | _] -> wait end - end), + end), ?assertEqual(ok, couch_server:lock(DbName, <<"y">>)), - ?assertEqual({error, {locked, <<"y">>}}, - couch_db:open(DbName, [])), + ?assertEqual( + {error, {locked, <<"y">>}}, + couch_db:open(DbName, []) + ), couch_server:unlock(DbName), {ok, Db1} = couch_db:open(DbName, [{create_if_missing, true}]), diff --git a/src/couch/test/eunit/couch_doc_json_tests.erl b/src/couch/test/eunit/couch_doc_json_tests.erl index a68932eca..a004ed8fd 100644 --- a/src/couch/test/eunit/couch_doc_json_tests.erl +++ b/src/couch/test/eunit/couch_doc_json_tests.erl @@ -15,7 +15,6 @@ -include_lib("couch/include/couch_eunit.hrl"). -include_lib("couch/include/couch_db.hrl"). - setup() -> mock(couch_log), mock(config), @@ -38,27 +37,32 @@ mock(couch_log) -> ok; mock(config) -> meck:new(config, [passthrough]), - meck:expect(config, get_integer, - fun("couchdb", "max_document_size", 8000000) -> 1024 end), + meck:expect( + config, + get_integer, + fun("couchdb", "max_document_size", 8000000) -> 1024 end + ), meck:expect(config, get, fun(_, _) -> undefined end), meck:expect(config, get, fun(_, _, Default) -> Default end), ok. - json_doc_test_() -> { setup, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, fun(_) -> - [{"Document from JSON", [ - from_json_with_dbname_error_cases(), - from_json_with_db_name_success_cases(), - from_json_success_cases(), - from_json_error_cases() - ]}, - {"Document to JSON", [ - to_json_success_cases() - ]}] + [ + {"Document from JSON", [ + from_json_with_dbname_error_cases(), + from_json_with_db_name_success_cases(), + from_json_success_cases(), + from_json_error_cases() + ]}, + {"Document to JSON", [ + to_json_success_cases() + ]} + ] end }. @@ -95,35 +99,42 @@ from_json_success_cases() -> "Non underscore prefixed fields stored in body." }, { - {[{<<"_attachments">>, {[ - {<<"my_attachment.fu">>, {[ - {<<"stub">>, true}, - {<<"content_type">>, <<"application/awesome">>}, - {<<"length">>, 45} - ]}}, - {<<"noahs_private_key.gpg">>, {[ - {<<"data">>, <<"SSBoYXZlIGEgcGV0IGZpc2gh">>}, - {<<"content_type">>, <<"application/pgp-signature">>} - ]}} - ]}}]}, - #doc{atts = [ - couch_att:new([ - {name, <<"my_attachment.fu">>}, - {data, stub}, - {type, <<"application/awesome">>}, - {att_len, 45}, - {disk_len, 45}, - {revpos, undefined} - ]), - couch_att:new([ - {name, <<"noahs_private_key.gpg">>}, - {data, <<"I have a pet fish!">>}, - {type, <<"application/pgp-signature">>}, - {att_len, 18}, - {disk_len, 18}, - {revpos, 0} - ]) + {[ + {<<"_attachments">>, + {[ + {<<"my_attachment.fu">>, + {[ + {<<"stub">>, true}, + {<<"content_type">>, <<"application/awesome">>}, + {<<"length">>, 45} + ]}}, + {<<"noahs_private_key.gpg">>, + {[ + {<<"data">>, <<"SSBoYXZlIGEgcGV0IGZpc2gh">>}, + {<<"content_type">>, <<"application/pgp-signature">>} + ]}} + ]}} ]}, + #doc{ + atts = [ + couch_att:new([ + {name, <<"my_attachment.fu">>}, + {data, stub}, + {type, <<"application/awesome">>}, + {att_len, 45}, + {disk_len, 45}, + {revpos, undefined} + ]), + couch_att:new([ + {name, <<"noahs_private_key.gpg">>}, + {data, <<"I have a pet fish!">>}, + {type, <<"application/pgp-signature">>}, + {att_len, 18}, + {disk_len, 18}, + {revpos, 0} + ]) + ] + }, "Attachments are parsed correctly." }, { @@ -138,11 +149,13 @@ from_json_success_cases() -> }, { {[ - {<<"_revisions">>, - {[{<<"start">>, 4}, - {<<"ids">>, [<<"foo1">>, <<"phi3">>, <<"omega">>]}]}}, - {<<"_rev">>, <<"6-something">>} - ]}, + {<<"_revisions">>, + {[ + {<<"start">>, 4}, + {<<"ids">>, [<<"foo1">>, <<"phi3">>, <<"omega">>]} + ]}}, + {<<"_rev">>, <<"6-something">>} + ]}, #doc{revs = {4, [<<"foo1">>, <<"phi3">>, <<"omega">>]}}, "_revisions attribute are preferred to _rev." }, @@ -171,7 +184,8 @@ from_json_success_cases() -> fun({EJson, Expect, Msg}) -> {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson))} end, - Cases). + Cases + ). from_json_with_db_name_success_cases() -> Cases = [ @@ -210,7 +224,8 @@ from_json_with_db_name_success_cases() -> fun({EJson, DbName, Expect, Msg}) -> {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson, DbName))} end, - Cases). + Cases + ). from_json_error_cases() -> Cases = [ @@ -236,8 +251,7 @@ from_json_error_cases() -> }, { {[{<<"_id">>, <<"_random">>}]}, - {illegal_docid, - <<"Only reserved document ids may start with underscore.">>}, + {illegal_docid, <<"Only reserved document ids may start with underscore.">>}, "Disallow arbitrary underscore prefixed docids." }, { @@ -270,8 +284,13 @@ from_json_error_cases() -> "Revision ids must be strings." }, { - {[{<<"_revisions">>, {[{<<"start">>, 0}, - {<<"ids">>, [<<"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>]}]}}]}, + {[ + {<<"_revisions">>, + {[ + {<<"start">>, 0}, + {<<"ids">>, [<<"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>]} + ]}} + ]}, {doc_validation, "RevId isn't a valid hexadecimal"}, "Revision ids must be a valid hex." }, @@ -284,7 +303,7 @@ from_json_error_cases() -> fun() -> {[ {<<"_id">>, <<"large_doc">>}, - {<<"x">> , << <<"x">> || _ <- lists:seq(1,1025) >>} + {<<"x">>, <<<<"x">> || _ <- lists:seq(1, 1025)>>} ]} end, {request_entity_too_large, <<"large_doc">>}, @@ -292,39 +311,36 @@ from_json_error_cases() -> } ], - lists:map(fun - ({Fun, Expect, Msg}) when is_function(Fun, 0) -> - {Msg, - ?_assertThrow(Expect, couch_doc:from_json_obj_validate(Fun()))}; - ({EJson, Expect, Msg}) -> - {Msg, - ?_assertThrow(Expect, couch_doc:from_json_obj_validate(EJson))}; - ({EJson, Msg}) -> - {Msg, - ?_assertThrow(_, couch_doc:from_json_obj_validate(EJson))} - end, Cases). + lists:map( + fun + ({Fun, Expect, Msg}) when is_function(Fun, 0) -> + {Msg, ?_assertThrow(Expect, couch_doc:from_json_obj_validate(Fun()))}; + ({EJson, Expect, Msg}) -> + {Msg, ?_assertThrow(Expect, couch_doc:from_json_obj_validate(EJson))}; + ({EJson, Msg}) -> + {Msg, ?_assertThrow(_, couch_doc:from_json_obj_validate(EJson))} + end, + Cases + ). from_json_with_dbname_error_cases() -> Cases = [ { {[{<<"_id">>, <<"_random">>}]}, <<"_dbs">>, - {illegal_docid, - <<"Only reserved document ids may start with underscore.">>}, + {illegal_docid, <<"Only reserved document ids may start with underscore.">>}, "Disallow non-system-DB underscore prefixed docids in _dbs database." }, { {[{<<"_id">>, <<"_random">>}]}, <<"foobar">>, - {illegal_docid, - <<"Only reserved document ids may start with underscore.">>}, + {illegal_docid, <<"Only reserved document ids may start with underscore.">>}, "Disallow arbitrary underscore prefixed docids in regular database." }, { {[{<<"_id">>, <<"_users">>}]}, <<"foobar">>, - {illegal_docid, - <<"Only reserved document ids may start with underscore.">>}, + {illegal_docid, <<"Only reserved document ids may start with underscore.">>}, "Disallow system-DB docid _users in regular database." } ], @@ -334,7 +350,8 @@ from_json_with_dbname_error_cases() -> Error = (catch couch_doc:from_json_obj_validate(EJson, DbName)), {Msg, ?_assertMatch(Expect, Error)} end, - Cases). + Cases + ). to_json_success_cases() -> Cases = [ @@ -357,13 +374,14 @@ to_json_success_cases() -> [revs], #doc{revs = {5, [<<"first">>, <<"second">>]}}, {[ - {<<"_id">>, <<>>}, - {<<"_rev">>, <<"5-first">>}, - {<<"_revisions">>, {[ - {<<"start">>, 5}, - {<<"ids">>, [<<"first">>, <<"second">>]} - ]}} - ]}, + {<<"_id">>, <<>>}, + {<<"_rev">>, <<"5-first">>}, + {<<"_revisions">>, + {[ + {<<"start">>, 5}, + {<<"ids">>, [<<"first">>, <<"second">>]} + ]}} + ]}, "_revisions include with revs option" }, { @@ -377,16 +395,18 @@ to_json_success_cases() -> "Deleted docs no longer drop body members." }, { - #doc{meta = [ - {revs_info, 4, [{<<"fin">>, deleted}, {<<"zim">>, missing}]} - ]}, + #doc{ + meta = [ + {revs_info, 4, [{<<"fin">>, deleted}, {<<"zim">>, missing}]} + ] + }, {[ - {<<"_id">>, <<>>}, - {<<"_revs_info">>, [ - {[{<<"rev">>, <<"4-fin">>}, {<<"status">>, <<"deleted">>}]}, - {[{<<"rev">>, <<"3-zim">>}, {<<"status">>, <<"missing">>}]} - ]} - ]}, + {<<"_id">>, <<>>}, + {<<"_revs_info">>, [ + {[{<<"rev">>, <<"4-fin">>}, {<<"status">>, <<"deleted">>}]}, + {[{<<"rev">>, <<"3-zim">>}, {<<"status">>, <<"missing">>}]} + ]} + ]}, "_revs_info field is added correctly." }, { @@ -405,89 +425,102 @@ to_json_success_cases() -> { #doc{meta = [{deleted_conflicts, [{10923, <<"big_cowboy_hat">>}]}]}, {[ - {<<"_id">>, <<>>}, - {<<"_deleted_conflicts">>, [<<"10923-big_cowboy_hat">>]} - ]}, + {<<"_id">>, <<>>}, + {<<"_deleted_conflicts">>, [<<"10923-big_cowboy_hat">>]} + ]}, "_deleted_conflicsts is added as an array of strings." }, { - #doc{atts = [ - couch_att:new([ - {name, <<"big.xml">>}, - {type, <<"xml/sucks">>}, - {data, fun() -> ok end}, - {revpos, 1}, - {att_len, 400}, - {disk_len, 400} - ]), - couch_att:new([ - {name, <<"fast.json">>}, - {type, <<"json/ftw">>}, - {data, <<"{\"so\": \"there!\"}">>}, - {revpos, 1}, - {att_len, 16}, - {disk_len, 16} - ]) - ]}, + #doc{ + atts = [ + couch_att:new([ + {name, <<"big.xml">>}, + {type, <<"xml/sucks">>}, + {data, fun() -> ok end}, + {revpos, 1}, + {att_len, 400}, + {disk_len, 400} + ]), + couch_att:new([ + {name, <<"fast.json">>}, + {type, <<"json/ftw">>}, + {data, <<"{\"so\": \"there!\"}">>}, + {revpos, 1}, + {att_len, 16}, + {disk_len, 16} + ]) + ] + }, {[ - {<<"_id">>, <<>>}, - {<<"_attachments">>, {[ - {<<"big.xml">>, {[ - {<<"content_type">>, <<"xml/sucks">>}, - {<<"revpos">>, 1}, - {<<"length">>, 400}, - {<<"stub">>, true} - ]}}, - {<<"fast.json">>, {[ - {<<"content_type">>, <<"json/ftw">>}, - {<<"revpos">>, 1}, - {<<"length">>, 16}, - {<<"stub">>, true} - ]}} - ]}} + {<<"_id">>, <<>>}, + {<<"_attachments">>, + {[ + {<<"big.xml">>, + {[ + {<<"content_type">>, <<"xml/sucks">>}, + {<<"revpos">>, 1}, + {<<"length">>, 400}, + {<<"stub">>, true} + ]}}, + {<<"fast.json">>, + {[ + {<<"content_type">>, <<"json/ftw">>}, + {<<"revpos">>, 1}, + {<<"length">>, 16}, + {<<"stub">>, true} + ]}} + ]}} ]}, "Attachments attached as stubs only include a length." }, { [attachments], - #doc{atts = [ - couch_att:new([ - {name, <<"stuff.txt">>}, - {type, <<"text/plain">>}, - {data, fun() -> <<"diet pepsi">> end}, - {revpos, 1}, - {att_len, 10}, - {disk_len, 10} - ]), - couch_att:new([ - {name, <<"food.now">>}, - {type, <<"application/food">>}, - {revpos, 1}, - {data, <<"sammich">>} - ]) - ]}, + #doc{ + atts = [ + couch_att:new([ + {name, <<"stuff.txt">>}, + {type, <<"text/plain">>}, + {data, fun() -> <<"diet pepsi">> end}, + {revpos, 1}, + {att_len, 10}, + {disk_len, 10} + ]), + couch_att:new([ + {name, <<"food.now">>}, + {type, <<"application/food">>}, + {revpos, 1}, + {data, <<"sammich">>} + ]) + ] + }, {[ {<<"_id">>, <<>>}, - {<<"_attachments">>, {[ - {<<"stuff.txt">>, {[ - {<<"content_type">>, <<"text/plain">>}, - {<<"revpos">>, 1}, - {<<"data">>, <<"ZGlldCBwZXBzaQ==">>} - ]}}, - {<<"food.now">>, {[ - {<<"content_type">>, <<"application/food">>}, - {<<"revpos">>, 1}, - {<<"data">>, <<"c2FtbWljaA==">>} - ]}} - ]}} + {<<"_attachments">>, + {[ + {<<"stuff.txt">>, + {[ + {<<"content_type">>, <<"text/plain">>}, + {<<"revpos">>, 1}, + {<<"data">>, <<"ZGlldCBwZXBzaQ==">>} + ]}}, + {<<"food.now">>, + {[ + {<<"content_type">>, <<"application/food">>}, + {<<"revpos">>, 1}, + {<<"data">>, <<"c2FtbWljaA==">>} + ]}} + ]}} ]}, "Attachments included inline with attachments option." } ], - lists:map(fun - ({Doc, EJson, Msg}) -> - {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, []))}; - ({Options, Doc, EJson, Msg}) -> - {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, Options))} - end, Cases). + lists:map( + fun + ({Doc, EJson, Msg}) -> + {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, []))}; + ({Options, Doc, EJson, Msg}) -> + {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, Options))} + end, + Cases + ). diff --git a/src/couch/test/eunit/couch_doc_tests.erl b/src/couch/test/eunit/couch_doc_tests.erl index fc63d3f30..5a6e11ab2 100644 --- a/src/couch/test/eunit/couch_doc_tests.erl +++ b/src/couch/test/eunit/couch_doc_tests.erl @@ -15,9 +15,9 @@ -include_lib("couch/include/couch_eunit.hrl"). -include_lib("couch/include/couch_db.hrl"). - -define(REQUEST_FIXTURE, - filename:join([?FIXTURESDIR, "multipart.http"])). + filename:join([?FIXTURESDIR, "multipart.http"]) +). parse_rev_test() -> ?assertEqual({1, <<"123">>}, couch_doc:parse_rev("1-123")), @@ -40,24 +40,30 @@ doc_to_multi_part_stream_test() -> JsonBytes = <<"{\n \"_id\": \"our document goes here\"\n}\n\n">>, AttData = <<"Hello my important document">>, AttLength = size(AttData), - Atts = [couch_att:new([ - {name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>}, - {att_len, AttLength}, {disk_len, AttLength}])], + Atts = [ + couch_att:new([ + {name, <<"test">>}, + {data, AttData}, + {type, <<"text/plain">>}, + {att_len, AttLength}, + {disk_len, AttLength} + ]) + ], couch_doc:doc_to_multi_part_stream(Boundary, JsonBytes, Atts, fun send/1, true), AttLengthStr = integer_to_binary(AttLength), BoundaryLen = size(Boundary), [ - <<"--", Boundary/binary>>, - <<"Content-Type: application/json">>, - <<>>, - JsonBytes, - <<"--", Boundary/binary>>, - <<"Content-Disposition: attachment; filename=\"test\"">>, - <<"Content-Type: text/plain">>, - <<"Content-Length: ", AttLengthStr/binary>>, - <<>>, - AttData, - <<"--", Boundary:BoundaryLen/binary, "--">> + <<"--", Boundary/binary>>, + <<"Content-Type: application/json">>, + <<>>, + JsonBytes, + <<"--", Boundary/binary>>, + <<"Content-Disposition: attachment; filename=\"test\"">>, + <<"Content-Type: text/plain">>, + <<"Content-Length: ", AttLengthStr/binary>>, + <<>>, + AttData, + <<"--", Boundary:BoundaryLen/binary, "--">> ] = collected(), ok. @@ -67,10 +73,17 @@ len_doc_to_multi_part_stream_test() -> ContentType = <<"multipart/related; boundary=\"", Boundary/binary, "\"">>, AttData = <<"Hello my important document">>, AttLength = size(AttData), - Atts = [couch_att:new([ - {name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>}, - {att_len, AttLength}, {disk_len, AttLength}])], - {ContentType, 258} = %% 258 is expected size of the document + Atts = [ + couch_att:new([ + {name, <<"test">>}, + {data, AttData}, + {type, <<"text/plain">>}, + {att_len, AttLength}, + {disk_len, AttLength} + ]) + ], + %% 258 is expected size of the document + {ContentType, 258} = couch_doc:len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, true), ok. @@ -93,29 +106,46 @@ validate_docid_test_() -> ?_assertEqual(ok, couch_doc:validate_docid(<<"_users">>, <<"_dbs">>)), ?_assertEqual(ok, couch_doc:validate_docid(<<"_replicator">>, <<"_dbs">>)), ?_assertEqual(ok, couch_doc:validate_docid(<<"_global_changes">>, <<"_dbs">>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<>>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<16#80>>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<"_idx">>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<"_">>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<"_design/">>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<"_local/">>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(large_id(1025))), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<"_users">>, <<"foo">>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<"_weeee">>, <<"_dbs">>)) - ] - }. + ?_assertThrow( + {illegal_docid, _}, + couch_doc:validate_docid(<<>>) + ), + ?_assertThrow( + {illegal_docid, _}, + couch_doc:validate_docid(<<16#80>>) + ), + ?_assertThrow( + {illegal_docid, _}, + couch_doc:validate_docid(<<"_idx">>) + ), + ?_assertThrow( + {illegal_docid, _}, + couch_doc:validate_docid(<<"_">>) + ), + ?_assertThrow( + {illegal_docid, _}, + couch_doc:validate_docid(<<"_design/">>) + ), + ?_assertThrow( + {illegal_docid, _}, + couch_doc:validate_docid(<<"_local/">>) + ), + ?_assertThrow( + {illegal_docid, _}, + couch_doc:validate_docid(large_id(1025)) + ), + ?_assertThrow( + {illegal_docid, _}, + couch_doc:validate_docid(<<"_users">>, <<"foo">>) + ), + ?_assertThrow( + {illegal_docid, _}, + couch_doc:validate_docid(<<"_weeee">>, <<"_dbs">>) + ) + ]}. large_id(N) -> - << <<"x">> || _ <- lists:seq(1, N) >>. + <<<<"x">> || _ <- lists:seq(1, N)>>. request(start) -> {ok, Doc} = file:read_file(?REQUEST_FIXTURE), @@ -128,7 +158,7 @@ send(Data) -> send(Data, undefined) -> send(Data, []); send(Data, Acc) -> - put(data, [Acc|Data]). + put(data, [Acc | Data]). collected() -> B = binary:replace(iolist_to_binary(get(data)), <<"\r\n">>, <<0>>, [global]), @@ -136,11 +166,14 @@ collected() -> mock_config() -> ok = meck:new(config, [passthrough]), - meck:expect(config, get, - fun("couchdb", "max_document_id_length", "infinity") -> "1024"; - ("couchdb", "max_attachment_size", "infinity") -> "infinity"; - ("couchdb", "max_attachment_size", 1073741824) -> 1073741824; - ("mem3", "shards_db", "_dbs") -> "_dbs"; + meck:expect( + config, + get, + fun + ("couchdb", "max_document_id_length", "infinity") -> "1024"; + ("couchdb", "max_attachment_size", "infinity") -> "infinity"; + ("couchdb", "max_attachment_size", 1073741824) -> 1073741824; + ("mem3", "shards_db", "_dbs") -> "_dbs"; (Key, Val, Default) -> meck:passthrough([Key, Val, Default]) end ). diff --git a/src/couch/test/eunit/couch_ejson_compare_tests.erl b/src/couch/test/eunit/couch_ejson_compare_tests.erl index 1dfbad4ed..f74e40b23 100644 --- a/src/couch/test/eunit/couch_ejson_compare_tests.erl +++ b/src/couch/test/eunit/couch_ejson_compare_tests.erl @@ -12,8 +12,7 @@ -module(couch_ejson_compare_tests). - --define(MAX_UNICODE_STRING, <<255,255,255,255>>). +-define(MAX_UNICODE_STRING, <<255, 255, 255, 255>>). % See mango_idx_view.hrl -define(MAX_JSON_OBJ, {?MAX_UNICODE_STRING}). @@ -52,40 +51,39 @@ {[{<<"b">>, 2}, {<<"c">>, 2}]} ]). - % Propery tests -ifdef(WITH_PROPER). -include_lib("couch/include/couch_eunit_proper.hrl"). - property_test_() -> ?EUNIT_QUICKCHECK(60, 400). - % Properties % The main, nif-based comparison, sorts the test values correctly prop_nif_sorts_correctly() -> Positions = get_positions(?TEST_VALUES), - ?FORALL(A, oneof(?TEST_VALUES), + ?FORALL( + A, + oneof(?TEST_VALUES), ?FORALL(B, oneof(?TEST_VALUES), begin expected_less(A, B, Positions) =:= less_nif(A, B) end) ). - % The erlang fallback comparison sorts the test values correctly prop_erlang_sorts_correctly() -> Positions = get_positions(?TEST_VALUES), - ?FORALL(A, oneof(?TEST_VALUES), + ?FORALL( + A, + oneof(?TEST_VALUES), ?FORALL(B, oneof(?TEST_VALUES), begin expected_less(A, B, Positions) =:= less_erl(A, B) end) ). - % Zero width unicode chars are ignored prop_equivalent_unicode_values() -> ?FORALL({Prefix, Suffix}, {zero_width_list(), zero_width_list()}, begin @@ -93,36 +91,33 @@ prop_equivalent_unicode_values() -> less(<<"a">>, Binary) =:= 0 end). - % Every test value sorts less than the special ?MAX_JSON_OBJ prop_test_values_are_less_than_max_json() -> ?FORALL(V, oneof(?TEST_VALUES), begin less(V, ?MAX_JSON_OBJ) =:= -1 end). - % Any json value sorts less than the special ?MAX_JSON_OBJ prop_any_json_is_less_than_max_json() -> ?FORALL(V, json(), begin less(V, ?MAX_JSON_OBJ) =:= -1 end). - % In general, for any json, the nif collator matches the erlang collator prop_nif_matches_erlang() -> - ?FORALL(A, json(), + ?FORALL( + A, + json(), ?FORALL(B, json(), begin less_nif(A, B) =:= less_erl(A, B) end) ). - % Generators json() -> ?SIZED(Size, json(Size)). - json(0) -> oneof([ null, @@ -133,7 +128,6 @@ json(0) -> [], {[]} ]); - json(Size) -> frequency([ {1, null}, @@ -147,40 +141,30 @@ json(Size) -> {5, ?LAZY(json_object(Size))} ]). - json_number() -> oneof([largeint(), int(), real()]). - json_string() -> utf8(). - json_array(0) -> []; - json_array(Size) -> vector(Size div 2, json(Size div 2)). - json_object(0) -> {[]}; - json_object(Size) -> {vector(Size div 2, {json_string(), json(Size div 2)})}. - zero_width_list() -> ?SIZED(Size, vector(Size, zero_width_chars())). - zero_width_chars() -> oneof([16#200B, 16#200C, 16#200D]). - -endif. - % Regular EUnit tests get_icu_version_test() -> @@ -192,7 +176,6 @@ get_icu_version_test() -> ?assert(is_integer(V3) andalso V3 >= 0), ?assert(is_integer(V4) andalso V4 >= 0). - get_uca_version_test() -> Ver = couch_ejson_compare:get_uca_version(), ?assertMatch({_, _, _, _}, Ver), @@ -202,7 +185,6 @@ get_uca_version_test() -> ?assert(is_integer(V3) andalso V3 >= 0), ?assert(is_integer(V4) andalso V4 >= 0). - max_depth_error_list_test() -> % NIF can handle terms with depth <= 9 Nested9 = nest_list(<<"val">>, 9), @@ -215,7 +197,6 @@ max_depth_error_list_test() -> % Then it should transparently jump to erlang land ?assertEqual(0, less(Nested10, Nested10)). - max_depth_error_obj_test() -> % NIF can handle terms with depth <= 9 Nested9 = nest_obj(<<"k">>, <<"v">>, 9), @@ -228,13 +209,12 @@ max_depth_error_obj_test() -> % Then it should transparently jump to erlang land ?assertEqual(0, less(Nested10, Nested10)). - compare_strings_nif_test() -> ?assertEqual(-1, compare_strings(<<"a">>, <<"b">>)), ?assertEqual(0, compare_strings(<<"a">>, <<"a">>)), ?assertEqual(1, compare_strings(<<"b">>, <<"a">>)), - LargeBin1 = << <<"x">> || _ <- lists:seq(1, 1000000)>>, + LargeBin1 = <<<<"x">> || _ <- lists:seq(1, 1000000)>>, LargeBin2 = <<LargeBin1/binary, "x">>, ?assertEqual(-1, compare_strings(LargeBin1, LargeBin2)), ?assertEqual(1, compare_strings(LargeBin2, LargeBin1)), @@ -244,47 +224,41 @@ compare_strings_nif_test() -> ?assertError(badarg, compare_strings(<<"a">>, 42)), ?assertError(badarg, compare_strings(42, 42)). - % Helper functions less(A, B) -> cmp_norm(couch_ejson_compare:less(A, B)). - less_nif(A, B) -> cmp_norm(couch_ejson_compare:less_nif(A, B)). - less_erl(A, B) -> cmp_norm(couch_ejson_compare:less_erl(A, B)). - compare_strings(A, B) -> couch_ejson_compare:compare_strings_nif(A, B). - nest_list(Val, 0) -> Val; - nest_list(Val, Depth) when is_integer(Depth), Depth > 0 -> [nest_list(Val, Depth - 1)]. - nest_obj(K, V, 1) -> {[{K, V}]}; - nest_obj(K, V, Depth) when is_integer(Depth), Depth > 1 -> {[{K, nest_obj(K, V, Depth - 1)}]}. - % Build a map of #{Val => PositionIndex} for the test values so that when any % two are compared we can verify their position in the test list matches the % compared result get_positions(TestValues) -> - lists:foldl(fun(Val, Acc) -> - Acc#{Val => map_size(Acc)} - end, #{}, TestValues). - + lists:foldl( + fun(Val, Acc) -> + Acc#{Val => map_size(Acc)} + end, + #{}, + TestValues + ). % When two values are compared, check the test values positions index to ensure % the order in the test value list matches the comparison result diff --git a/src/couch/test/eunit/couch_ejson_size_tests.erl b/src/couch/test/eunit/couch_ejson_size_tests.erl index df9168ed1..27803d8b7 100644 --- a/src/couch/test/eunit/couch_ejson_size_tests.erl +++ b/src/couch/test/eunit/couch_ejson_size_tests.erl @@ -14,59 +14,86 @@ -include_lib("eunit/include/eunit.hrl"). --define(HWAIR, $\x{10348}). % 4 byte utf8 encoding --define(EURO, $\x{20ac}). % 3 byte utf8 encoding --define(CENT, $\x{a2}). % 2 byte utf8 encoding - +% 4 byte utf8 encoding +-define(HWAIR, $\x{10348}). +% 3 byte utf8 encoding +-define(EURO, $\x{20ac}). +% 2 byte utf8 encoding +-define(CENT, $\x{a2}). ejson_size_test_() -> - [?_assertEqual(R, couch_ejson_size:encoded_size(Input)) || {R, Input} <- [ - {1, 1}, {1, 1}, {2, -1}, {1, 9}, {2, 10}, {3, -10}, - {2, 11}, {2, 99}, {3, 100}, {3, 999}, {4, 1000}, {4, 9999}, - {5, 10000}, - - {3, 0.0}, {3, 0.1}, {3, 1.0}, {4, -1.0}, {3, 1.0e9}, - {4, 1.0e10}, {5, 1.0e-10}, {5, 1.0e-99}, {6, 1.0e-100}, {3, 1.0e-323}, - - {2, arr_nested(0)}, {22, arr_nested(10)}, {2002, arr_nested(1000)}, - {9, obj_nested(0)}, {69, obj_nested(10)}, {6009, obj_nested(1000)}, - - {4, null}, {4, true}, {5, false}, - - {3, str(1, $x)}, {4, str(1, ?CENT)}, {5, str(1, ?EURO)}, - {6, str(1, ?HWAIR)}, {3, str(1, $\x{1})}, {12, str(10, $x)}, - {22, str(10, ?CENT)}, {32, str(10, ?EURO)}, {42, str(10, ?HWAIR)}, - {12, str(10, $\x{1})} - ]]. - + [ + ?_assertEqual(R, couch_ejson_size:encoded_size(Input)) + || {R, Input} <- [ + {1, 1}, + {1, 1}, + {2, -1}, + {1, 9}, + {2, 10}, + {3, -10}, + {2, 11}, + {2, 99}, + {3, 100}, + {3, 999}, + {4, 1000}, + {4, 9999}, + {5, 10000}, + + {3, 0.0}, + {3, 0.1}, + {3, 1.0}, + {4, -1.0}, + {3, 1.0e9}, + {4, 1.0e10}, + {5, 1.0e-10}, + {5, 1.0e-99}, + {6, 1.0e-100}, + {3, 1.0e-323}, + + {2, arr_nested(0)}, + {22, arr_nested(10)}, + {2002, arr_nested(1000)}, + {9, obj_nested(0)}, + {69, obj_nested(10)}, + {6009, obj_nested(1000)}, + + {4, null}, + {4, true}, + {5, false}, + + {3, str(1, $x)}, + {4, str(1, ?CENT)}, + {5, str(1, ?EURO)}, + {6, str(1, ?HWAIR)}, + {3, str(1, $\x{1})}, + {12, str(10, $x)}, + {22, str(10, ?CENT)}, + {32, str(10, ?EURO)}, + {42, str(10, ?HWAIR)}, + {12, str(10, $\x{1})} + ] + ]. %% Helper functions arr_nested(MaxDepth) -> arr_nested(MaxDepth, 0). - obj_nested(MaxDepth) -> obj_nested(MaxDepth, 0). - obj(N, K, V) -> {[{K, V} || _ <- lists:seq(1, N)]}. - str(N, C) -> unicode:characters_to_binary([C || _ <- lists:seq(1, N)]). - arr_nested(MaxDepth, MaxDepth) -> []; - arr_nested(MaxDepth, Depth) -> [arr_nested(MaxDepth, Depth + 1)]. - obj_nested(MaxDepth, MaxDepth) -> obj(1, <<"k">>, <<"v">>); - obj_nested(MaxDepth, Depth) -> {[{<<"k">>, obj_nested(MaxDepth, Depth + 1)}]}. diff --git a/src/couch/test/eunit/couch_etag_tests.erl b/src/couch/test/eunit/couch_etag_tests.erl index 9d15e483f..72db6008a 100644 --- a/src/couch/test/eunit/couch_etag_tests.erl +++ b/src/couch/test/eunit/couch_etag_tests.erl @@ -18,13 +18,14 @@ local_with_empty_body_test() -> Etag = couch_httpd:doc_etag(<<"_local/local-and-empty">>, {[]}, {0, <<"1">>}), ?assertEqual(Etag, <<"\"5ZVXQYO7VLEOU0TL9VXDNP5PV\"">>). - local_with_body_test() -> - DocBody = {[{<<"hello">>,<<"world">>},{<<"relax">>,true}]}, + DocBody = {[{<<"hello">>, <<"world">>}, {<<"relax">>, true}]}, Etag = couch_httpd:doc_etag(<<"_local/local-with-body">>, DocBody, {0, <<"1">>}), ?assertEqual(Etag, <<"\"CEFXP6WH8OKYIWO1GLGBHKCCA\"">>). normal_doc_uses_rev_test() -> - DocBody = {[{<<"hello">>,<<"world">>},{<<"relax">>,true}]}, - Etag = couch_httpd:doc_etag(<<"nomal-doc">>, DocBody, {1, <<"efda11e34e88ebe31a2f83e84a0435b6">>}), + DocBody = {[{<<"hello">>, <<"world">>}, {<<"relax">>, true}]}, + Etag = couch_httpd:doc_etag( + <<"nomal-doc">>, DocBody, {1, <<"efda11e34e88ebe31a2f83e84a0435b6">>} + ), ?assertEqual(Etag, <<"\"1-efda11e34e88ebe31a2f83e84a0435b6\"">>). diff --git a/src/couch/test/eunit/couch_file_tests.erl b/src/couch/test/eunit/couch_file_tests.erl index 606f4bbf4..1b54cd70e 100644 --- a/src/couch/test/eunit/couch_file_tests.erl +++ b/src/couch/test/eunit/couch_file_tests.erl @@ -18,7 +18,6 @@ -define(setup(F), {setup, fun setup/0, fun teardown/1, F}). -define(foreach(Fs), {foreach, fun setup/0, fun teardown/1, Fs}). - setup() -> {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]), Fd. @@ -34,7 +33,8 @@ open_close_test_() -> "Test for proper file open and close", { setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + fun() -> test_util:start(?MODULE, [ioq]) end, + fun test_util:stop/1, [ should_return_enoent_if_missed(), should_ignore_invalid_flags_with_open(), @@ -49,8 +49,10 @@ should_return_enoent_if_missed() -> ?_assertEqual({error, enoent}, couch_file:open("not a real file")). should_ignore_invalid_flags_with_open() -> - ?_assertMatch({ok, _}, - couch_file:open(?tempfile(), [create, invalid_option])). + ?_assertMatch( + {ok, _}, + couch_file:open(?tempfile(), [create, invalid_option]) + ). should_return_pid_on_file_open(Fd) -> ?_assert(is_pid(Fd)). @@ -63,13 +65,13 @@ should_close_file_properly() -> should_create_empty_new_files(Fd) -> ?_assertMatch({ok, 0}, couch_file:bytes(Fd)). - read_write_test_() -> { "Common file read/write tests", { setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + fun() -> test_util:start(?MODULE, [ioq]) end, + fun test_util:stop/1, ?foreach([ fun should_increase_file_size_on_write/1, fun should_return_current_file_size_on_write/1, @@ -86,7 +88,6 @@ read_write_test_() -> } }. - should_increase_file_size_on_write(Fd) -> {ok, 0, _} = couch_file:append_term(Fd, foo), {ok, Size} = couch_file:bytes(Fd), @@ -111,7 +112,7 @@ should_return_term_as_binary_for_reading_binary(Fd) -> ?_assertMatch({ok, Foo}, couch_file:pread_binary(Fd, Pos)). should_read_term_written_as_binary(Fd) -> - {ok, Pos, _} = couch_file:append_binary(Fd, <<131,100,0,3,102,111,111>>), + {ok, Pos, _} = couch_file:append_binary(Fd, <<131, 100, 0, 3, 102, 111, 111>>), ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)). should_write_and_read_large_binary(Fd) -> @@ -139,8 +140,7 @@ should_not_read_beyond_eof(Fd) -> ok = file:pwrite(Io, Pos, <<0:1/integer, DoubleBin:31/integer>>), file:close(Io), unlink(Fd), - ExpectedError = {badmatch, {'EXIT', {bad_return_value, - {read_beyond_eof, Filepath}}}}, + ExpectedError = {badmatch, {'EXIT', {bad_return_value, {read_beyond_eof, Filepath}}}}, ?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)). should_truncate(Fd) -> @@ -180,17 +180,16 @@ should_not_read_more_than_pread_limit(Fd) -> BigBin = list_to_binary(lists:duplicate(100000, 0)), {ok, Pos, _Size} = couch_file:append_binary(Fd, BigBin), unlink(Fd), - ExpectedError = {badmatch, {'EXIT', {bad_return_value, - {exceed_pread_limit, Filepath, 50000}}}}, + ExpectedError = {badmatch, {'EXIT', {bad_return_value, {exceed_pread_limit, Filepath, 50000}}}}, ?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)). - header_test_() -> { "File header read/write tests", { setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + fun() -> test_util:start(?MODULE, [ioq]) end, + fun test_util:stop/1, [ ?foreach([ fun should_write_and_read_atom_header/1, @@ -208,7 +207,6 @@ header_test_() -> } }. - should_write_and_read_atom_header(Fd) -> ok = couch_file:write_header(Fd, hello), ?_assertMatch({ok, hello}, couch_file:read_header(Fd)). @@ -243,7 +241,6 @@ should_save_headers_larger_than_block_size(Fd) -> couch_file:write_header(Fd, Header), {"COUCHDB-1319", ?_assertMatch({ok, Header}, couch_file:read_header(Fd))}. - should_recover_header_marker_corruption() -> ?_assertMatch( ok, @@ -252,7 +249,8 @@ should_recover_header_marker_corruption() -> ?assertNotMatch(Expect, couch_file:read_header(CouchFd)), file:pwrite(RawFd, HeaderPos, <<0>>), ?assertMatch(Expect, couch_file:read_header(CouchFd)) - end) + end + ) ). should_recover_header_size_corruption() -> @@ -264,7 +262,8 @@ should_recover_header_size_corruption() -> % +1 for 0x1 byte marker file:pwrite(RawFd, HeaderPos + 1, <<10/integer>>), ?assertMatch(Expect, couch_file:read_header(CouchFd)) - end) + end + ) ). should_recover_header_md5sig_corruption() -> @@ -276,7 +275,8 @@ should_recover_header_md5sig_corruption() -> % +5 = +1 for 0x1 byte and +4 for term size. file:pwrite(RawFd, HeaderPos + 5, <<"F01034F88D320B22">>), ?assertMatch(Expect, couch_file:read_header(CouchFd)) - end) + end + ) ). should_recover_header_data_corruption() -> @@ -288,10 +288,10 @@ should_recover_header_data_corruption() -> % +21 = +1 for 0x1 byte, +4 for term size and +16 for MD5 sig file:pwrite(RawFd, HeaderPos + 21, <<"some data goes here!">>), ?assertMatch(Expect, couch_file:read_header(CouchFd)) - end) + end + ) ). - check_header_recovery(CheckFun) -> Path = ?tempfile(), {ok, Fd} = couch_file:open(Path, [create, overwrite]), @@ -322,7 +322,6 @@ write_random_data(Fd, N) -> {ok, _, _} = couch_file:append_term(Fd, Term), write_random_data(Fd, N - 1). - delete_test_() -> { "File delete tests", @@ -350,34 +349,33 @@ delete_test_() -> [ fun(Cfg) -> {"enable_database_recovery = false, context = delete", - make_enable_recovery_test_case(Cfg, false, delete)} + make_enable_recovery_test_case(Cfg, false, delete)} end, fun(Cfg) -> {"enable_database_recovery = true, context = delete", - make_enable_recovery_test_case(Cfg, true, delete)} + make_enable_recovery_test_case(Cfg, true, delete)} end, fun(Cfg) -> {"enable_database_recovery = false, context = compaction", - make_enable_recovery_test_case(Cfg, false, compaction)} + make_enable_recovery_test_case(Cfg, false, compaction)} end, fun(Cfg) -> {"enable_database_recovery = true, context = compaction", - make_enable_recovery_test_case(Cfg, true, compaction)} + make_enable_recovery_test_case(Cfg, true, compaction)} end, fun(Cfg) -> {"delete_after_rename = true", - make_delete_after_rename_test_case(Cfg, true)} + make_delete_after_rename_test_case(Cfg, true)} end, fun(Cfg) -> {"delete_after_rename = false", - make_delete_after_rename_test_case(Cfg, false)} + make_delete_after_rename_test_case(Cfg, false)} end ] } } }. - make_enable_recovery_test_case({RootDir, File}, EnableRecovery, Context) -> meck:expect(config, get_boolean, fun ("couchdb", "enable_database_recovery", _) -> EnableRecovery; @@ -388,10 +386,11 @@ make_enable_recovery_test_case({RootDir, File}, EnableRecovery, Context) -> FileExistsAfter = filelib:is_regular(File), RenamedFiles = filelib:wildcard(filename:rootname(File) ++ "*.deleted.*"), DeletedFiles = filelib:wildcard(RootDir ++ "/.delete/*"), - {ExpectRenamedCount, ExpectDeletedCount} = if - EnableRecovery andalso Context =:= delete -> {1, 0}; - true -> {0, 1} - end, + {ExpectRenamedCount, ExpectDeletedCount} = + if + EnableRecovery andalso Context =:= delete -> {1, 0}; + true -> {0, 1} + end, [ ?_assert(FileExistsBefore), ?_assertNot(FileExistsAfter), @@ -408,14 +407,17 @@ make_delete_after_rename_test_case({RootDir, File}, DeleteAfterRename) -> couch_file:delete(RootDir, File), FileExistsAfter = filelib:is_regular(File), RenamedFiles = filelib:wildcard(filename:join([RootDir, ".delete", "*"])), - ExpectRenamedCount = if DeleteAfterRename -> 0; true -> 1 end, + ExpectRenamedCount = + if + DeleteAfterRename -> 0; + true -> 1 + end, [ ?_assert(FileExistsBefore), ?_assertNot(FileExistsAfter), ?_assertEqual(ExpectRenamedCount, length(RenamedFiles)) ]. - nuke_dir_test_() -> { "Nuke directory tests", @@ -454,27 +456,22 @@ nuke_dir_test_() -> end, [ fun(Cfg) -> - {"enable_database_recovery = false", - make_rename_dir_test_case(Cfg, false)} + {"enable_database_recovery = false", make_rename_dir_test_case(Cfg, false)} end, fun(Cfg) -> - {"enable_database_recovery = true", - make_rename_dir_test_case(Cfg, true)} + {"enable_database_recovery = true", make_rename_dir_test_case(Cfg, true)} end, fun(Cfg) -> - {"delete_after_rename = true", - make_delete_dir_test_case(Cfg, true)} + {"delete_after_rename = true", make_delete_dir_test_case(Cfg, true)} end, fun(Cfg) -> - {"delete_after_rename = false", - make_delete_dir_test_case(Cfg, false)} + {"delete_after_rename = false", make_delete_dir_test_case(Cfg, false)} end ] } } }. - make_rename_dir_test_case({RootDir, ViewDir}, EnableRecovery) -> meck:expect(config, get_boolean, fun ("couchdb", "enable_database_recovery", _) -> EnableRecovery; @@ -486,7 +483,11 @@ make_rename_dir_test_case({RootDir, ViewDir}, EnableRecovery) -> DirExistsAfter = filelib:is_dir(ViewDir), Ext = filename:extension(ViewDir), RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext), - ExpectRenamedCount = if EnableRecovery -> 1; true -> 0 end, + ExpectRenamedCount = + if + EnableRecovery -> 1; + true -> 0 + end, [ ?_assert(DirExistsBefore), ?_assertNot(DirExistsAfter), @@ -505,7 +506,11 @@ make_delete_dir_test_case({RootDir, ViewDir}, DeleteAfterRename) -> Ext = filename:extension(ViewDir), RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext), RenamedFiles = filelib:wildcard(RootDir ++ "/.delete/*"), - ExpectRenamedCount = if DeleteAfterRename -> 0; true -> 1 end, + ExpectRenamedCount = + if + DeleteAfterRename -> 0; + true -> 1 + end, [ ?_assert(DirExistsBefore), ?_assertNot(DirExistsAfter), @@ -517,7 +522,6 @@ remove_dir(Dir) -> [file:delete(File) || File <- filelib:wildcard(filename:join([Dir, "*"]))], file:del_dir(Dir). - fsync_error_test_() -> { "Test fsync raises errors", @@ -535,12 +539,10 @@ fsync_error_test_() -> } }. - fsync_raises_errors() -> Fd = spawn(fun() -> fake_fsync_fd() end), ?assertError({fsync_error, eio}, couch_file:sync(Fd)). - fake_fsync_fd() -> % Mocking gen_server did not go very % well so faking the couch_file pid diff --git a/src/couch/test/eunit/couch_flags_config_tests.erl b/src/couch/test/eunit/couch_flags_config_tests.erl index 6fe2d5860..05707718b 100644 --- a/src/couch/test/eunit/couch_flags_config_tests.erl +++ b/src/couch/test/eunit/couch_flags_config_tests.erl @@ -19,9 +19,9 @@ couch_flags_config_test_() -> setup, fun setup/0, fun teardown/1, - [fun all_combinations_return_same_result/0] - ++ latest_overide_wins() - ++ [ + [fun all_combinations_return_same_result/0] ++ + latest_overide_wins() ++ + [ {"rules_are_sorted", fun rules_are_sorted/0} ] } @@ -29,50 +29,72 @@ couch_flags_config_test_() -> all_combinations_return_same_result() -> Config = [ - {"foo, bar||*", "true"}, - {"baz, qux||*", "false"}, - {"baz||shards/test*", "true"}, - {"baz||shards/blacklist*", "false"}, - {"bar||shards/test*", "false"}, - {"bar||shards/test/blacklist*", "true"} + {"foo, bar||*", "true"}, + {"baz, qux||*", "false"}, + {"baz||shards/test*", "true"}, + {"baz||shards/blacklist*", "false"}, + {"bar||shards/test*", "false"}, + {"bar||shards/test/blacklist*", "true"} ], Expected = [ - {{<<"shards/test/blacklist*">>},{<<"shards/test/blacklist*">>,22,[bar, baz, foo]}}, - {{<<"shards/test*">>},{<<"shards/test*">>, 12, [baz, foo]}}, - {{<<"shards/blacklist*">>},{<<"shards/blacklist*">>, 17, [bar, foo]}}, - {{<<"*">>},{<<"*">>, 1, [bar, foo]}} + {{<<"shards/test/blacklist*">>}, {<<"shards/test/blacklist*">>, 22, [bar, baz, foo]}}, + {{<<"shards/test*">>}, {<<"shards/test*">>, 12, [baz, foo]}}, + {{<<"shards/blacklist*">>}, {<<"shards/blacklist*">>, 17, [bar, foo]}}, + {{<<"*">>}, {<<"*">>, 1, [bar, foo]}} ], Combinations = couch_tests_combinatorics:permutations(Config), - lists:foreach(fun(Items) -> - ?assertEqual(Expected, couch_flags_config:data(Items)) - end, Combinations). + lists:foreach( + fun(Items) -> + ?assertEqual(Expected, couch_flags_config:data(Items)) + end, + Combinations + ). rules_are_sorted() -> Expected = [ - {{<<"shards/test/exact">>},{<<"shards/test/exact">>, 17, [baz,flag_bar,flag_foo]}}, - {{<<"shards/test/blacklist*">>},{<<"shards/test/blacklist*">>,22,[flag_foo]}}, - {{<<"shards/test*">>},{<<"shards/test*">>, 12, [baz,flag_bar,flag_foo]}}, - {{<<"shards/exact">>},{<<"shards/exact">>, 12, [flag_bar,flag_foo]}}, - {{<<"shards/blacklist*">>},{<<"shards/blacklist*">>, 17, []}}, - {{<<"*">>},{<<"*">>, 1, [flag_foo]}} + {{<<"shards/test/exact">>}, {<<"shards/test/exact">>, 17, [baz, flag_bar, flag_foo]}}, + {{<<"shards/test/blacklist*">>}, {<<"shards/test/blacklist*">>, 22, [flag_foo]}}, + {{<<"shards/test*">>}, {<<"shards/test*">>, 12, [baz, flag_bar, flag_foo]}}, + {{<<"shards/exact">>}, {<<"shards/exact">>, 12, [flag_bar, flag_foo]}}, + {{<<"shards/blacklist*">>}, {<<"shards/blacklist*">>, 17, []}}, + {{<<"*">>}, {<<"*">>, 1, [flag_foo]}} ], ?assertEqual(Expected, couch_flags_config:data(test_config())). latest_overide_wins() -> Cases = [ - {[ - {"flag||*", "false"}, {"flag||a*", "true"}, - {"flag||ab*", "true"}, {"flag||abc*", "true"} - ], true}, - {[ - {"flag||*", "true"}, {"flag||a*", "false"}, - {"flag||ab*", "true"}, {"flag||abc*", "false"} - ], false} + { + [ + {"flag||*", "false"}, + {"flag||a*", "true"}, + {"flag||ab*", "true"}, + {"flag||abc*", "true"} + ], + true + }, + { + [ + {"flag||*", "true"}, + {"flag||a*", "false"}, + {"flag||ab*", "true"}, + {"flag||abc*", "false"} + ], + false + } ], - [{test_id(Rules, Expected), - ?_assertEqual(Expected, lists:member(flag, - flags(hd(couch_flags_config:data(Rules)))))} - || {Rules, Expected} <- Cases]. + [ + { + test_id(Rules, Expected), + ?_assertEqual( + Expected, + lists:member( + flag, + flags(hd(couch_flags_config:data(Rules))) + ) + ) + } + || {Rules, Expected} <- Cases + ]. flags({{_Pattern}, {_Pattern, _Size, Flags}}) -> Flags. @@ -80,7 +102,6 @@ flags({{_Pattern}, {_Pattern, _Size, Flags}}) -> test_id(Items, ExpectedResult) -> lists:flatten(io_lib:format("~p -> ~p", [[P || {P, _} <- Items], ExpectedResult])). - test_config() -> [ {"flag_foo||*", "true"}, @@ -95,22 +116,32 @@ test_config() -> parse_flags_term_test_() -> LongBinary = binary:copy(<<"a">>, ?MAX_FLAG_NAME_LENGTH + 1), - ExpectedError = {error, {"Cannot parse list of tags: ~n~p", - [{too_long, LongBinary}]}}, - ExpectedUnknownError = {error,{"Cannot parse list of tags: ~n~p", - [{invalid_flag,<<"dddddddd">>}]}}, - [ - {"empty binary", ?_assertEqual( - [], couch_flags_config:parse_flags_term(<<>>))}, - {"single flag", ?_assertEqual( - [fff], couch_flags_config:parse_flags_term(<<"fff">>))}, - {"sorted", ?_assertEqual( - [aaa,bbb,fff], couch_flags_config:parse_flags_term(<<"fff,aaa,bbb">>))}, - {"whitespace", ?_assertEqual( - [aaa,bbb,fff], couch_flags_config:parse_flags_term(<<"fff , aaa, bbb ">>))}, - {"error", ?_assertEqual( - ExpectedError, couch_flags_config:parse_flags_term(LongBinary))}, - {"unknown_flag", ?_assertEqual( - ExpectedUnknownError, couch_flags_config:parse_flags_term(<<"dddddddd">>))} - ]. - + ExpectedError = {error, {"Cannot parse list of tags: ~n~p", [{too_long, LongBinary}]}}, + ExpectedUnknownError = + {error, {"Cannot parse list of tags: ~n~p", [{invalid_flag, <<"dddddddd">>}]}}, + [ + {"empty binary", + ?_assertEqual( + [], couch_flags_config:parse_flags_term(<<>>) + )}, + {"single flag", + ?_assertEqual( + [fff], couch_flags_config:parse_flags_term(<<"fff">>) + )}, + {"sorted", + ?_assertEqual( + [aaa, bbb, fff], couch_flags_config:parse_flags_term(<<"fff,aaa,bbb">>) + )}, + {"whitespace", + ?_assertEqual( + [aaa, bbb, fff], couch_flags_config:parse_flags_term(<<"fff , aaa, bbb ">>) + )}, + {"error", + ?_assertEqual( + ExpectedError, couch_flags_config:parse_flags_term(LongBinary) + )}, + {"unknown_flag", + ?_assertEqual( + ExpectedUnknownError, couch_flags_config:parse_flags_term(<<"dddddddd">>) + )} + ]. diff --git a/src/couch/test/eunit/couch_flags_tests.erl b/src/couch/test/eunit/couch_flags_tests.erl index 32ec57b77..e3635e9f2 100644 --- a/src/couch/test/eunit/couch_flags_tests.erl +++ b/src/couch/test/eunit/couch_flags_tests.erl @@ -55,7 +55,9 @@ rules() -> setup() -> %% FIXME after we upgrade couch_epi - application:stop(couch_epi), % in case it's already running from other tests... + + % in case it's already running from other tests... + application:stop(couch_epi), application:unload(couch_epi), application:load(couch_epi), @@ -63,8 +65,7 @@ setup() -> meck:expect(config, get, 1, []), Ctx = test_util:start_couch([couch_epi]), - Ctx. - + Ctx. teardown(Ctx) -> test_util:stop_couch(Ctx), @@ -76,57 +77,65 @@ couch_flags_test_() -> { "test couch_flags", { - setup, fun setup/0, fun teardown/1, - enabled_flags_tests() - ++ is_enabled() -%% ++ match_performance() + setup, + fun setup/0, + fun teardown/1, + enabled_flags_tests() ++ + is_enabled() + %% ++ match_performance() } }. enabled_flags_tests() -> - - [{"enabled_flags_tests", [ - {"flags_default_rule", - ?_assertEqual( - [foo], couch_flags:enabled("something"))}, - {"flags_wildcard_rule", - ?_assertEqual( - [bar, baz, foo], - couch_flags:enabled("shards/test/something"))}, - {"flags_exact_rule", - ?_assertEqual( - [bar, baz, foo], - couch_flags:enabled("shards/test/exact"))}, - {"flags_blacklist_rule", - ?_assertEqual( - [], - couch_flags:enabled("shards/blacklist/4"))} - ]}]. + [ + {"enabled_flags_tests", [ + {"flags_default_rule", + ?_assertEqual( + [foo], couch_flags:enabled("something") + )}, + {"flags_wildcard_rule", + ?_assertEqual( + [bar, baz, foo], + couch_flags:enabled("shards/test/something") + )}, + {"flags_exact_rule", + ?_assertEqual( + [bar, baz, foo], + couch_flags:enabled("shards/test/exact") + )}, + {"flags_blacklist_rule", + ?_assertEqual( + [], + couch_flags:enabled("shards/blacklist/4") + )} + ]} + ]. is_enabled() -> - [{"is_enabled_tests", [ - {"flags_default_rule [enabled]", - ?_assert(couch_flags:is_enabled(foo, "something"))}, - {"flags_default_rule [disabled]", - ?_assertNot(couch_flags:is_enabled(baz, "something"))}, - {"flags_default_rule [not_existent]", - ?_assertNot(couch_flags:is_enabled(non_existent, "something"))}, - - {"flags_wildcard_rule [enabled]", - ?_assert(couch_flags:is_enabled(bar, "shards/test/something"))}, - {"flags_wildcard_rule [not_existent]", - ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/something"))}, - - {"flags_exact_rule [overide_disbled]", - ?_assert(couch_flags:is_enabled(bar, "shards/test/exact"))}, - {"flags_exact_rule [not_existent]", - ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/exact"))}, - - {"flags_blacklist_rule [overide_enabled]", - ?_assertNot(couch_flags:is_enabled(foo, "shards/blacklist/4"))}, - {"flags_blacklist_rule [not_existent]", - ?_assertNot(couch_flags:is_enabled(non_existent, "shards/blacklist/4"))} - ]}]. + [ + {"is_enabled_tests", [ + {"flags_default_rule [enabled]", ?_assert(couch_flags:is_enabled(foo, "something"))}, + {"flags_default_rule [disabled]", + ?_assertNot(couch_flags:is_enabled(baz, "something"))}, + {"flags_default_rule [not_existent]", + ?_assertNot(couch_flags:is_enabled(non_existent, "something"))}, + + {"flags_wildcard_rule [enabled]", + ?_assert(couch_flags:is_enabled(bar, "shards/test/something"))}, + {"flags_wildcard_rule [not_existent]", + ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/something"))}, + + {"flags_exact_rule [overide_disbled]", + ?_assert(couch_flags:is_enabled(bar, "shards/test/exact"))}, + {"flags_exact_rule [not_existent]", + ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/exact"))}, + + {"flags_blacklist_rule [overide_enabled]", + ?_assertNot(couch_flags:is_enabled(foo, "shards/blacklist/4"))}, + {"flags_blacklist_rule [not_existent]", + ?_assertNot(couch_flags:is_enabled(non_existent, "shards/blacklist/4"))} + ]} + ]. %% match_performance() -> %% [{"match_performance", [ @@ -137,7 +146,6 @@ is_enabled() -> %% end) %% ]}]. - test_config() -> [ {"foo||/*", "true"}, diff --git a/src/couch/test/eunit/couch_index_tests.erl b/src/couch/test/eunit/couch_index_tests.erl index 23c857d6c..368f7a059 100644 --- a/src/couch/test/eunit/couch_index_tests.erl +++ b/src/couch/test/eunit/couch_index_tests.erl @@ -36,10 +36,12 @@ couch_index_ioq_priority_test_() -> "Test ioq_priority for views", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun check_io_priority_for_updater/1, fun check_io_priority_for_compactor/1 @@ -48,11 +50,11 @@ couch_index_ioq_priority_test_() -> } }. - check_io_priority_for_updater(DbName) -> ?_test(begin {ok, IndexerPid} = couch_index_server:get_index( - couch_mrview_index, DbName, <<"_design/foo">>), + couch_mrview_index, DbName, <<"_design/foo">> + ), CouchIndexUpdaterPid = updater_pid(IndexerPid), tracer_record(CouchIndexUpdaterPid), @@ -63,15 +65,23 @@ check_io_priority_for_updater(DbName) -> [UpdaterPid] = wait_spawn_event_for_pid(CouchIndexUpdaterPid), [UpdaterMapProcess] = wait_spawn_by_anonymous_fun( - UpdaterPid, '-start_update/4-fun-0-'), + UpdaterPid, '-start_update/4-fun-0-' + ), - ?assert(wait_set_io_priority( - UpdaterMapProcess, {view_update, DbName, <<"_design/foo">>})), + ?assert( + wait_set_io_priority( + UpdaterMapProcess, {view_update, DbName, <<"_design/foo">>} + ) + ), [UpdaterWriterProcess] = wait_spawn_by_anonymous_fun( - UpdaterPid, '-start_update/4-fun-1-'), - ?assert(wait_set_io_priority( - UpdaterWriterProcess, {view_update, DbName, <<"_design/foo">>})), + UpdaterPid, '-start_update/4-fun-1-' + ), + ?assert( + wait_set_io_priority( + UpdaterWriterProcess, {view_update, DbName, <<"_design/foo">>} + ) + ), ok end). @@ -79,7 +89,8 @@ check_io_priority_for_updater(DbName) -> check_io_priority_for_compactor(DbName) -> ?_test(begin {ok, IndexerPid} = couch_index_server:get_index( - couch_mrview_index, DbName, <<"_design/foo">>), + couch_mrview_index, DbName, <<"_design/foo">> + ), {ok, CompactorPid} = couch_index:get_compactor_pid(IndexerPid), tracer_record(CompactorPid), @@ -89,51 +100,65 @@ check_io_priority_for_compactor(DbName) -> wait_spawn_event_for_pid(CompactorPid), [CompactorProcess] = wait_spawn_by_anonymous_fun( - CompactorPid, '-handle_call/3-fun-0-'), - ?assert(wait_set_io_priority( - CompactorProcess, {view_compact, DbName, <<"_design/foo">>})), + CompactorPid, '-handle_call/3-fun-0-' + ), + ?assert( + wait_set_io_priority( + CompactorProcess, {view_compact, DbName, <<"_design/foo">>} + ) + ), ok end). create_docs(DbName) -> {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - Doc1 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc1">>}, - {<<"value">>, 1} - - ]}), - Doc2 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc2">>}, - {<<"value">>, 2} - - ]}), - Doc3 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc3">>}, - {<<"value">>, 3} - - ]}), + Doc1 = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"doc1">>}, + {<<"value">>, 1} + ]} + ), + Doc2 = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"doc2">>}, + {<<"value">>, 2} + ]} + ), + Doc3 = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"doc3">>}, + {<<"value">>, 3} + ]} + ), {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]), couch_db:close(Db). create_design_doc(DbName, DDName, ViewName) -> {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDName}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {ViewName, {[ - {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>} - ]}} - ]}} - ]}), + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, DDName}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, + {[ + {ViewName, + {[ + {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>} + ]}} + ]}} + ]} + ), {ok, Rev} = couch_db:update_doc(Db, DDoc, []), couch_db:close(Db), Rev. wait_set_io_priority(Pid, IOPriority) -> - test_util:wait_value(fun() -> - does_process_set_io_priority(Pid, IOPriority) - end, true). + test_util:wait_value( + fun() -> + does_process_set_io_priority(Pid, IOPriority) + end, + true + ). does_process_set_io_priority(Pid, IOPriority) -> PutCallsArgs = find_calls_to_fun(Pid, {erlang, put, 2}), @@ -143,36 +168,47 @@ wait_events(MatchSpec) -> test_util:wait_other_value(fun() -> select(MatchSpec) end, []). find_spawned_by_anonymous_fun(ParentPid, Name) -> - AnonymousFuns = select(ets:fun2ms(fun - ({spawned, Pid, _TS, _Name, _Dict, [PPid, {erlang, apply, [Fun, _]}]}) - when is_function(Fun) andalso PPid =:= ParentPid -> {Pid, Fun} - end)), - lists:filtermap(fun({Pid, Fun}) -> - case erlang:fun_info(Fun, name) of - {name, Name} -> {true, Pid}; - _ -> false - end - end, AnonymousFuns). + AnonymousFuns = select( + ets:fun2ms(fun({spawned, Pid, _TS, _Name, _Dict, [PPid, {erlang, apply, [Fun, _]}]}) when + is_function(Fun) andalso PPid =:= ParentPid + -> + {Pid, Fun} + end) + ), + lists:filtermap( + fun({Pid, Fun}) -> + case erlang:fun_info(Fun, name) of + {name, Name} -> {true, Pid}; + _ -> false + end + end, + AnonymousFuns + ). find_calls_to_fun(Pid, {Module, Function, Arity}) -> - select(ets:fun2ms(fun - ({call, P, _TS, _Name, _Dict, [{M, F, Args}]}) - when length(Args) =:= Arity - andalso M =:= Module - andalso F =:= Function - andalso P =:= Pid - -> Args - end)). + select( + ets:fun2ms(fun({call, P, _TS, _Name, _Dict, [{M, F, Args}]}) when + length(Args) =:= Arity andalso + M =:= Module andalso + F =:= Function andalso + P =:= Pid + -> + Args + end) + ). wait_spawn_event_for_pid(ParentPid) -> - wait_events(ets:fun2ms(fun - ({spawned, Pid, _TS, _Name, _Dict, [P, _]}) when P =:= ParentPid -> Pid - end)). + wait_events( + ets:fun2ms(fun({spawned, Pid, _TS, _Name, _Dict, [P, _]}) when P =:= ParentPid -> Pid end) + ). wait_spawn_by_anonymous_fun(ParentPid, Name) -> - test_util:wait_other_value(fun() -> - find_spawned_by_anonymous_fun(ParentPid, Name) - end, []). + test_util:wait_other_value( + fun() -> + find_spawned_by_anonymous_fun(ParentPid, Name) + end, + [] + ). updater_pid(IndexerPid) -> {links, Links} = process_info(IndexerPid, links), @@ -180,20 +216,25 @@ updater_pid(IndexerPid) -> Pid. select_process_by_name_prefix(Pids, Name) -> - lists:filter(fun(Pid) -> - Key = couch_debug:process_name(Pid), - string:str(Key, Name) =:= 1 - end, Pids). + lists:filter( + fun(Pid) -> + Key = couch_debug:process_name(Pid), + string:str(Key, Name) =:= 1 + end, + Pids + ). select(MatchSpec) -> - lists:filtermap(fun(Event) -> - case ets:test_ms(Event, MatchSpec) of - {ok, false} -> false; - {ok, Result} -> {true, Result}; - _ -> false - end - end, tracer_events()). - + lists:filtermap( + fun(Event) -> + case ets:test_ms(Event, MatchSpec) of + {ok, false} -> false; + {ok, Result} -> {true, Result}; + _ -> false + end + end, + tracer_events() + ). %% ======================== %% Tracer related functions @@ -225,7 +266,7 @@ tracer_collector(Msg, Seq) -> normalize_trace_msg(TraceMsg) -> case tuple_to_list(TraceMsg) of [trace_ts, Pid, Type | Info] -> - {TraceInfo, [Timestamp]} = lists:split(length(Info)-1, Info), + {TraceInfo, [Timestamp]} = lists:split(length(Info) - 1, Info), {Type, Pid, Timestamp, couch_debug:process_name(Pid), process_info(Pid), TraceInfo}; [trace, Pid, Type | TraceInfo] -> {Type, Pid, os:timestamp(), couch_debug:process_name(Pid), process_info(Pid), TraceInfo} diff --git a/src/couch/test/eunit/couch_js_tests.erl b/src/couch/test/eunit/couch_js_tests.erl index c68d60125..1079678da 100644 --- a/src/couch/test/eunit/couch_js_tests.erl +++ b/src/couch/test/eunit/couch_js_tests.erl @@ -13,7 +13,6 @@ -module(couch_js_tests). -include_lib("eunit/include/eunit.hrl"). - couch_js_test_() -> { "Test couchjs", @@ -32,104 +31,131 @@ couch_js_test_() -> } }. - should_create_sandbox() -> % Try and detect whether we can see out of the % sandbox or not. Src = << - "function(doc) {\n" - " try {\n" - " emit(false, typeof(Couch.compile_function));\n" - " } catch (e) {\n" - " emit(true, e.message);\n" - " }\n" - "}\n" + "function(doc) {\n" + " try {\n" + " emit(false, typeof(Couch.compile_function));\n" + " } catch (e) {\n" + " emit(true, e.message);\n" + " }\n" + "}\n" >>, Proc = couch_query_servers:get_os_process(<<"javascript">>), true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]), Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, <<"{}">>]), ?assertEqual([[[true, <<"Couch is not defined">>]]], Result). - should_roundtrip_utf8() -> % Try round tripping UTF-8 both directions through % couchjs. These tests use hex encoded values of % Ä (C384) and Ü (C39C) so as to avoid odd editor/Erlang encoding % strangeness. Src = << - "function(doc) {\n" - " emit(doc.value, \"", 16#C3, 16#9C, "\");\n" - "}\n" + "function(doc) {\n" + " emit(doc.value, \"", + 16#C3, + 16#9C, + "\");\n" + "}\n" >>, Proc = couch_query_servers:get_os_process(<<"javascript">>), true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]), - Doc = {[ - {<<"value">>, <<16#C3, 16#84>>} - ]}, + Doc = + {[ + {<<"value">>, <<16#C3, 16#84>>} + ]}, Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]), ?assertEqual([[[<<16#C3, 16#84>>, <<16#C3, 16#9C>>]]], Result). - should_roundtrip_modified_utf8() -> % Mimicing the test case from the mailing list Src = << - "function(doc) {\n" - " emit(doc.value.toLowerCase(), \"", 16#C3, 16#9C, "\");\n" - "}\n" + "function(doc) {\n" + " emit(doc.value.toLowerCase(), \"", + 16#C3, + 16#9C, + "\");\n" + "}\n" >>, Proc = couch_query_servers:get_os_process(<<"javascript">>), true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]), - Doc = {[ - {<<"value">>, <<16#C3, 16#84>>} - ]}, + Doc = + {[ + {<<"value">>, <<16#C3, 16#84>>} + ]}, Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]), ?assertEqual([[[<<16#C3, 16#A4>>, <<16#C3, 16#9C>>]]], Result). - should_replace_broken_utf16() -> % This test reverse the surrogate pair of % the Boom emoji U+1F4A5 Src = << - "function(doc) {\n" - " emit(doc.value.split(\"\").reverse().join(\"\"), 1);\n" - "}\n" + "function(doc) {\n" + " emit(doc.value.split(\"\").reverse().join(\"\"), 1);\n" + "}\n" >>, Proc = couch_query_servers:get_os_process(<<"javascript">>), true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]), - Doc = {[ - {<<"value">>, list_to_binary(xmerl_ucs:to_utf8([16#1F4A5]))} - ]}, + Doc = + {[ + {<<"value">>, list_to_binary(xmerl_ucs:to_utf8([16#1F4A5]))} + ]}, Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]), % Invalid UTF-8 gets replaced with the 16#FFFD replacement % marker Markers = list_to_binary(xmerl_ucs:to_utf8([16#FFFD, 16#FFFD])), ?assertEqual([[[Markers, 1]]], Result). - should_allow_js_string_mutations() -> % This binary corresponds to this string: мама мыла раму % Which I'm told translates to: "mom was washing the frame" MomWashedTheFrame = << - 16#D0, 16#BC, 16#D0, 16#B0, 16#D0, 16#BC, 16#D0, 16#B0, 16#20, - 16#D0, 16#BC, 16#D1, 16#8B, 16#D0, 16#BB, 16#D0, 16#B0, 16#20, - 16#D1, 16#80, 16#D0, 16#B0, 16#D0, 16#BC, 16#D1, 16#83 + 16#D0, + 16#BC, + 16#D0, + 16#B0, + 16#D0, + 16#BC, + 16#D0, + 16#B0, + 16#20, + 16#D0, + 16#BC, + 16#D1, + 16#8B, + 16#D0, + 16#BB, + 16#D0, + 16#B0, + 16#20, + 16#D1, + 16#80, + 16#D0, + 16#B0, + 16#D0, + 16#BC, + 16#D1, + 16#83 >>, Mom = <<16#D0, 16#BC, 16#D0, 16#B0, 16#D0, 16#BC, 16#D0, 16#B0>>, Washed = <<16#D0, 16#BC, 16#D1, 16#8B, 16#D0, 16#BB, 16#D0, 16#B0>>, Src1 = << - "function(doc) {\n" - " emit(\"length\", doc.value.length);\n" - "}\n" + "function(doc) {\n" + " emit(\"length\", doc.value.length);\n" + "}\n" >>, Src2 = << - "function(doc) {\n" - " emit(\"substring\", doc.value.substring(5, 9));\n" - "}\n" + "function(doc) {\n" + " emit(\"substring\", doc.value.substring(5, 9));\n" + "}\n" >>, Src3 = << - "function(doc) {\n" - " emit(\"slice\", doc.value.slice(0, 4));\n" - "}\n" + "function(doc) {\n" + " emit(\"slice\", doc.value.slice(0, 4));\n" + "}\n" >>, Proc = couch_query_servers:get_os_process(<<"javascript">>), true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src1]), @@ -144,29 +170,30 @@ should_allow_js_string_mutations() -> ], ?assertEqual(Expect, Result). - should_exit_on_oom() -> Src = << - "var state = [];\n" - "function(doc) {\n" - " var val = \"0123456789ABCDEF\";\n" - " for(var i = 0; i < 665535; i++) {\n" - " state.push([val, val]);\n" - " emit(null, null);\n" - " }\n" - "}\n" + "var state = [];\n" + "function(doc) {\n" + " var val = \"0123456789ABCDEF\";\n" + " for(var i = 0; i < 665535; i++) {\n" + " state.push([val, val]);\n" + " emit(null, null);\n" + " }\n" + "}\n" >>, Proc = couch_query_servers:get_os_process(<<"javascript">>), true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]), trigger_oom(Proc). trigger_oom(Proc) -> - Status = try - couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, <<"{}">>]), - continue - catch throw:{os_process_error, {exit_status, 1}} -> - done - end, + Status = + try + couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, <<"{}">>]), + continue + catch + throw:{os_process_error, {exit_status, 1}} -> + done + end, case Status of continue -> trigger_oom(Proc); done -> ok diff --git a/src/couch/test/eunit/couch_key_tree_prop_tests.erl b/src/couch/test/eunit/couch_key_tree_prop_tests.erl index 9c09aace5..d6ed26553 100644 --- a/src/couch/test/eunit/couch_key_tree_prop_tests.erl +++ b/src/couch/test/eunit/couch_key_tree_prop_tests.erl @@ -12,49 +12,53 @@ -module(couch_key_tree_prop_tests). - -ifdef(WITH_PROPER). -include_lib("couch/include/couch_eunit_proper.hrl"). - --define(SIZE_REDUCTION, 3). % How much to reduce size with tree depth. --define(MAX_BRANCHES, 4). % Maximum number of branches. +% How much to reduce size with tree depth. +-define(SIZE_REDUCTION, 3). +% Maximum number of branches. +-define(MAX_BRANCHES, 4). -define(RAND_SIZE, 1 bsl 64). - property_test_() -> ?EUNIT_QUICKCHECK(60). - % % Properties % - % Merge random paths from a revtree into itself. Check that no revisions have % been lost in the process and that result is one of the 3 expected values. % prop_revtree_merge_with_subset_of_own_nodes() -> - ?FORALL(Revs, g_revs(), - ?FORALL({RevTree, Branch}, {g_revtree(Revs), g_revtree(Revs, 1)}, - ?IMPLIES(length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [], + ?FORALL( + Revs, + g_revs(), + ?FORALL( + {RevTree, Branch}, + {g_revtree(Revs), g_revtree(Revs, 1)}, + ?IMPLIES( + length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [], begin {Merged, Result} = couch_key_tree:merge(RevTree, hd(Branch)), - lists:member(Result, [new_leaf, new_branch, internal_node]) - andalso same_keys(RevTree ++ Branch, Merged) - andalso valid_revtree(Merged) + lists:member(Result, [new_leaf, new_branch, internal_node]) andalso + same_keys(RevTree ++ Branch, Merged) andalso + valid_revtree(Merged) end ) ) ). - % Merge random trees into revtree. % prop_revtree_merge_random_nodes() -> - ?FORALL({RevTree, Branch}, {g_revtree(), g_revtree([], 1)}, - ?IMPLIES(length(Branch) > 0, + ?FORALL( + {RevTree, Branch}, + {g_revtree(), g_revtree([], 1)}, + ?IMPLIES( + length(Branch) > 0, begin {Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)), valid_revtree(Merged) @@ -62,33 +66,35 @@ prop_revtree_merge_random_nodes() -> ) ). - - % Merge mix or random and existing revtree paths into revtree % prop_revtree_merge_some_existing_some_new() -> - ?FORALL(RevTree, g_revtree(), - ?FORALL(Branch, + ?FORALL( + RevTree, + g_revtree(), + ?FORALL( + Branch, begin KeyList = keylist(RevTree), Half = lists:sublist(KeyList, length(KeyList) div 2), g_revtree(Half, 1) end, - ?IMPLIES(length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [], - begin - {Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)), - valid_revtree(Merged) - end + ?IMPLIES( + length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [], + begin + {Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)), + valid_revtree(Merged) + end ) ) ). - - % Stem deeper than the current max level. Expect no changes to the revtree % prop_no_change_stemming_deeper_than_current_depth() -> - ?FORALL(RevTree, g_revtree(), + ?FORALL( + RevTree, + g_revtree(), begin StemDepth = depth(RevTree) + 1, Stemmed = couch_key_tree:stem(RevTree, StemDepth), @@ -98,12 +104,13 @@ prop_no_change_stemming_deeper_than_current_depth() -> end ). - % Stem at a random small depth, make sure that resulting tree has % unique revisions and the same number or less revisions than input % prop_stemming_results_in_same_or_less_total_revs() -> - ?FORALL({RevTree, StemDepth}, {g_revtree(), choose(1, 20)}, + ?FORALL( + {RevTree, StemDepth}, + {g_revtree(), choose(1, 20)}, begin Stemmed = couch_key_tree:stem(RevTree, StemDepth), OldRealDepth = real_depth(RevTree), @@ -111,89 +118,95 @@ prop_stemming_results_in_same_or_less_total_revs() -> UniqueStemmedKeys = lists:usort(StemmedKeys), UniqueInputKeys = lists:usort(keylist(RevTree)), NewRealDepth = real_depth(Stemmed), - length(StemmedKeys) == length(UniqueStemmedKeys) - andalso length(UniqueStemmedKeys) =< length(UniqueInputKeys) - andalso OldRealDepth >= NewRealDepth + length(StemmedKeys) == length(UniqueStemmedKeys) andalso + length(UniqueStemmedKeys) =< length(UniqueInputKeys) andalso + OldRealDepth >= NewRealDepth end ). - % Generate a longer path (revtree with no branches) then stem it. % Always expect it to shrink to stemmed depth. prop_stem_path_expect_size_to_get_smaller() -> - ?FORALL({RevTree, StemDepth}, + ?FORALL( + {RevTree, StemDepth}, { ?SIZED(Size, g_revtree(Size * 10, [], 1)), - choose(1,3) + choose(1, 3) }, - ?IMPLIES(real_depth(RevTree) > 3, + ?IMPLIES( + real_depth(RevTree) > 3, begin Stemmed = couch_key_tree:stem(RevTree, StemDepth), StemmedKeys = lists:usort(keylist(Stemmed)), InputKeys = lists:usort(keylist(RevTree)), - length(InputKeys) > length(StemmedKeys) - andalso real_depth(Stemmed) == StemDepth + length(InputKeys) > length(StemmedKeys) andalso + real_depth(Stemmed) == StemDepth end ) ). - % After stemming all leaves are still present prop_after_stemming_all_leaves_are_present() -> - ?FORALL({RevTree, StemDepth}, - {g_revtree(), choose(1,20)}, + ?FORALL( + {RevTree, StemDepth}, + {g_revtree(), choose(1, 20)}, begin OldRealDepth = real_depth(RevTree), OldLeaves = leaves(RevTree), Stemmed = couch_key_tree:stem(RevTree, StemDepth), NewRealDepth = real_depth(Stemmed), NewLeaves = leaves(Stemmed), - valid_revtree(Stemmed) - andalso OldRealDepth >= NewRealDepth - andalso OldLeaves == NewLeaves - + valid_revtree(Stemmed) andalso + OldRealDepth >= NewRealDepth andalso + OldLeaves == NewLeaves end ). - % After stemming paths to root didn't get longer prop_after_stemming_paths_are_shorter() -> - ?FORALL({StemDepth, RevTree}, {choose(2,10), g_revtree()}, + ?FORALL( + {StemDepth, RevTree}, + {choose(2, 10), g_revtree()}, begin OldPaths = paths(RevTree), Stemmed = couch_key_tree:stem(RevTree, StemDepth), NewPaths = paths(Stemmed), - GrowingPaths = orddict:fold(fun(Rev, Path, Acc) -> - OldPath = orddict:fetch(Rev, OldPaths), - case length(Path) > length(OldPath) of - true -> - [{Rev, Path, OldPath}| Acc]; - false -> - Acc - end - end, [], NewPaths), + GrowingPaths = orddict:fold( + fun(Rev, Path, Acc) -> + OldPath = orddict:fetch(Rev, OldPaths), + case length(Path) > length(OldPath) of + true -> + [{Rev, Path, OldPath} | Acc]; + false -> + Acc + end + end, + [], + NewPaths + ), valid_revtree(Stemmed) andalso GrowingPaths == [] end ). - % Check leaf count prop_leaf_count() -> - ?FORALL(RevTree, g_revtree(), + ?FORALL( + RevTree, + g_revtree(), length(leaves(RevTree)) == couch_key_tree:count_leafs(RevTree) ). - % Check get leafs prop_get_leafs() -> - ?FORALL(RevTree, g_revtree(), + ?FORALL( + RevTree, + g_revtree(), begin LeafsFull = couch_key_tree:get_all_leafs(RevTree), lists:usort([Rev || {_V, {_D, [Rev | _]}} <- LeafsFull]) == leaves(RevTree) end ). - % % Generators % @@ -205,39 +218,40 @@ prop_get_leafs() -> g_revtree() -> ?SIZED(Size, g_revtree(Size)). - g_revtree(Size) when is_integer(Size) -> g_revtree(Size, [], ?MAX_BRANCHES); g_revtree(Revs) when is_list(Revs) -> ?SIZED(Size, g_revtree(Size, Revs, ?MAX_BRANCHES)). - g_revtree(Size, Revs) when is_integer(Size), is_list(Revs) -> g_revtree(Size, Revs, ?MAX_BRANCHES); g_revtree(Revs, MaxBranches) when is_list(Revs), is_integer(MaxBranches) -> ?SIZED(Size, g_revtree(Size, Revs, MaxBranches)). - g_revtree(0, _Revs, _MaxBranches) -> []; g_revtree(Size, ERevs, MaxBranches) -> - ?LET({Depth, Revs}, {g_stem_depth(Size), g_revs(Size, ERevs)}, - [{Depth, g_treenode(Size, Revs, MaxBranches)}] + ?LET( + {Depth, Revs}, + {g_stem_depth(Size), g_revs(Size, ERevs)}, + [{Depth, g_treenode(Size, Revs, MaxBranches)}] ). - % Generate a tree node and then recursively generate its children. % g_treenode(0, Revs, _) -> {elements(Revs), x, []}; g_treenode(Size, Revs, MaxBranches) -> - ?LAZY(?LET(N, choose(0, MaxBranches), - begin - [Rev | ChildRevs] = Revs, - {Rev, x, g_nodes(Size div ?SIZE_REDUCTION, N, ChildRevs, MaxBranches)} - end - )). - + ?LAZY( + ?LET( + N, + choose(0, MaxBranches), + begin + [Rev | ChildRevs] = Revs, + {Rev, x, g_nodes(Size div ?SIZE_REDUCTION, N, ChildRevs, MaxBranches)} + end + ) + ). % Generate a list of child nodes. Depending on how many children there are % the pre-generarated revision list is split into that many sublists. @@ -256,82 +270,70 @@ g_nodes(Size, ChildCount, Revs, MaxBranches) -> ordered_nodes(ChildNodes) ). - % Generate each subtree's stem depth % - g_stem_depth(Size) -> - choose(0, expected_height(Size, ?SIZE_REDUCTION) div 2). - + choose(0, expected_height(Size, ?SIZE_REDUCTION) div 2). % Uses the shuffle/1 function to shuffle the input list. Unshuffled list is % used as the shrink value. % -g_shuffle([]) -> []; +g_shuffle([]) -> + []; g_shuffle(L) when is_list(L) -> - ?LET(X, elements(L), [X | g_shuffle(lists:delete(X,L))]). - + ?LET(X, elements(L), [X | g_shuffle(lists:delete(X, L))]). % Wrapper to make a list shuffling generator that doesn't shrink % g_shuffle_noshrink(L) when is_list(L) -> proper_types:noshrink(g_shuffle(L)). - % Generate shuffled sublists up to N items long from a list. % g_shuffled_sublists(L, N) -> ?LET(Shuffled, g_shuffle_noshrink(L), lists:sublist(Shuffled, N)). - % Generate revision lists. % g_revs() -> ?SIZED(Size, g_revs(Size)). - g_revs(Size) when is_integer(Size) -> g_revs(Size, []). - g_revs(Size, Existing) when is_integer(Size), is_list(Existing) -> Expected = keys_needed(Size, ?SIZE_REDUCTION, ?MAX_BRANCHES), Revs = revs(Expected, Existing), case length(Revs) > Expected of - true -> % have extra, try various sublists + % have extra, try various sublists + true -> g_shuffled_sublists(Revs, Expected); false -> proper_types:return(Revs) end. - % % Helper functions % - valid_revtree(RevTree) -> repeating_revs(levels(RevTree)) == [] andalso children_sorted(RevTree). - same_keys(RevTree1, RevTree2) -> Keys1 = lists:usort(keylist(RevTree1)), Keys2 = lists:usort(keylist(RevTree2)), Keys1 == Keys2. - all(L) -> lists:all(fun(E) -> E end, L). - % Generate list of relateively unique large random numbers rand_list(N) when N =< 0 -> []; rand_list(N) -> [rand:uniform(?RAND_SIZE) || _ <- lists:seq(1, N)]. - % Generate a list of revisions to be used as key in revision trees. Expected % must the number of maximum expected nodes in a revision tree. Existing is an % optional list revisions which must be included in the result. The output list @@ -342,7 +344,6 @@ revs(Expected, Existing) when is_integer(Expected), is_list(Existing) -> Need = Expected - length(Existing), lists:usort(lists:append(Existing, rand_list(Need))). - % Get the list of all the keys in a revision tree. The input can also be a % an individual tree (tagged with the depth to virtual root) or a node. % Yes, this is not tail recursive but the idea is to keep it simple. @@ -354,7 +355,6 @@ keylist({K, _V, Nodes}) -> keylist(Nodes) -> lists:append([keylist(Node) || Node <- Nodes]). - % Get the list of leaves from a revision tree. leaves([]) -> []; @@ -367,7 +367,6 @@ leaves({_K, _V, Nodes}) -> leaves(Nodes) -> lists:usort(lists:append([leaves(N) || N <- Nodes])). - % Get paths from leaf to root. Result is an orddict of [{LeafRev, [Rev]}] % paths([]) -> @@ -382,14 +381,20 @@ paths({K, _V, Nodes}) -> CombinedDict = paths_merge_dicts([paths(N) || N <- Nodes]), orddict:map(fun(_LeafKey, Path) -> Path ++ [K] end, CombinedDict). - paths_merge_dicts(Dicts) -> - lists:foldl(fun(D, AccD) -> - orddict:merge(fun(K, V1, V2) -> - throw({found_duplicates, K, V1, V2}) - end, D, AccD) - end, orddict:new(), Dicts). - + lists:foldl( + fun(D, AccD) -> + orddict:merge( + fun(K, V1, V2) -> + throw({found_duplicates, K, V1, V2}) + end, + D, + AccD + ) + end, + orddict:new(), + Dicts + ). % Get lists of all the keys at each depth level. Result is an orddict that % looks like [{depth, [key]}]. The depth used here is the "virtual" depth as @@ -400,32 +405,36 @@ levels([]) -> levels(RevTree) when is_list(RevTree) -> lists:foldl(fun(T, Dict) -> levels(T, Dict) end, orddict:new(), RevTree). - levels({Depth, Node}, Dict) when is_tuple(Node) -> levels(Node, Depth, Dict). - levels({K, _V, Nodes}, Depth, Dict) -> - Dict1 = case orddict:is_key(Depth, Dict) of - true -> orddict:append(Depth, K, Dict); - false -> orddict:store(Depth, [K], Dict) - end, + Dict1 = + case orddict:is_key(Depth, Dict) of + true -> orddict:append(Depth, K, Dict); + false -> orddict:store(Depth, [K], Dict) + end, levels(Nodes, Depth + 1, Dict1); levels(Nodes, Depth, Dict) -> - lists:foldl(fun(Node, AccDict) -> - levels(Node, Depth, AccDict) - end, Dict, Nodes). - + lists:foldl( + fun(Node, AccDict) -> + levels(Node, Depth, AccDict) + end, + Dict, + Nodes + ). % Using the output of leaves/1 as input return any repeating revisions if % there are any at a particular level. Levels which have not revisions are % not returned. % repeating_revs(Dict) -> - orddict:filter(fun(_Depth, Revs) -> - length(lists:usort(Revs)) =/= length(Revs) - end, Dict). - + orddict:filter( + fun(_Depth, Revs) -> + length(lists:usort(Revs)) =/= length(Revs) + end, + Dict + ). % Check that children of all nodes are sorted children_sorted([]) -> @@ -437,7 +446,6 @@ children_sorted({_D, Node}) when is_tuple(Node) -> children_sorted({_K, _V, Nodes}) -> children_sorted(Nodes). - % Get the maximum depth of a revtree. The depth is "virtual" as it takes into % account the distance to the now stemmed root node as indicated by the top % level subtrees. @@ -449,7 +457,6 @@ depth(RevTree) when is_list(RevTree) -> depth({Depth, Node}) when is_tuple(Node) -> depth(Node, Depth - 1). - depth({_K, _V, Nodes}, Depth) -> depth(Nodes, Depth + 1); depth([], Depth) -> @@ -457,7 +464,6 @@ depth([], Depth) -> depth(Nodes, Depth) -> lists:max([depth(Node, Depth) || Node <- Nodes]). - % Get the "real" tree depth, not the virtual one. As revtrees gets stemmed they % will keep their virtual depth but the actual number of nodes in the tree % could be reduced. @@ -467,8 +473,8 @@ real_depth([]) -> real_depth(RevTree) when is_list(RevTree) -> lists:max([real_depth(T) || T <- RevTree]); real_depth({_Depth, Node}) when is_tuple(Node) -> - depth(Node, 0). % Note from here on use the depth/3 function - + % Note from here on use the depth/3 function + depth(Node, 0). % Return an ordered list of revtree nodes. When sorting only immediate keys % (revisions) are looked at and comparison doesn't descent into the treee. @@ -476,7 +482,6 @@ real_depth({_Depth, Node}) when is_tuple(Node) -> ordered_nodes(Nodes) -> lists:sort(fun({K1, _, _}, {K2, _, _}) -> K1 =< K2 end, Nodes). - % Calculate a maximum number of rev tree nodes needed for a tree of a given % height and branchiness. Height is derived from Size and LevelReductionFactor, % that is how big the sample should be and quickly the size parameter would @@ -487,20 +492,17 @@ keys_needed(0, _, _) -> keys_needed(Size, LevelReductionFactor, 1) -> expected_height(Size, LevelReductionFactor); keys_needed(Size, LevelReductionFactor, Branches) -> - Height = expected_height(Size, LevelReductionFactor), + Height = expected_height(Size, LevelReductionFactor), trunc(math:pow(Branches, Height + 1)) + 1. - % Calculate expected tree height for a given sample size and branchiness. % At each step the size is divided by the reduction factor. expected_height(Size, LevelReductionFactor) -> trunc(log(LevelReductionFactor, Size)) + 1. - log(B, X) -> math:log(X) / math:log(B). - % Distribute items in a list into roughly equal chunks of a given size. % distribute(_ChunkSize, []) -> @@ -511,7 +513,6 @@ distribute(ChunkSize, L) -> {L1, L2} = lists:split(ChunkSize, L), [L1 | distribute(ChunkSize, L2)]. - % Split a single (parent) revision list into chunks (sub-lists), one for each % child. Also, for safety, double check that at this point in the process the % list of revisions is sufficiently large. If it isn't something went wrong and diff --git a/src/couch/test/eunit/couch_key_tree_tests.erl b/src/couch/test/eunit/couch_key_tree_tests.erl index 5d9cc8372..f571139c9 100644 --- a/src/couch/test/eunit/couch_key_tree_tests.erl +++ b/src/couch/test/eunit/couch_key_tree_tests.erl @@ -16,8 +16,7 @@ -define(DEPTH, 10). - -key_tree_merge_test_()-> +key_tree_merge_test_() -> { "Key tree merge", [ @@ -40,16 +39,16 @@ key_tree_merge_test_()-> ] }. -key_tree_missing_leaves_test_()-> +key_tree_missing_leaves_test_() -> { - "Missing tree leaves", - [ - should_not_find_missing_leaves(), - should_find_missing_leaves() - ] + "Missing tree leaves", + [ + should_not_find_missing_leaves(), + should_find_missing_leaves() + ] }. -key_tree_remove_leaves_test_()-> +key_tree_remove_leaves_test_() -> { "Remove tree leaves", [ @@ -62,7 +61,7 @@ key_tree_remove_leaves_test_()-> ] }. -key_tree_get_leaves_test_()-> +key_tree_get_leaves_test_() -> { "Leaves retrieving", [ @@ -80,7 +79,7 @@ key_tree_get_leaves_test_()-> ] }. -key_tree_leaf_counting_test_()-> +key_tree_leaf_counting_test_() -> { "Leaf counting", [ @@ -91,7 +90,7 @@ key_tree_leaf_counting_test_()-> ] }. -key_tree_stemming_test_()-> +key_tree_stemming_test_() -> { "Stemming", [ @@ -101,48 +100,71 @@ key_tree_stemming_test_()-> ] }. - -should_merge_with_empty_tree()-> - One = {1, {"1","foo",[]}}, - ?_assertEqual({[One], new_leaf}, - merge_and_stem([], One)). - -should_merge_reflexive()-> - One = {1, {"1","foo",[]}}, - ?_assertEqual({[One], internal_node}, - merge_and_stem([One], One)). - -should_merge_prefix_of_a_tree_with_tree()-> - One = {1, {"1","foo",[]}}, - TwoSibs = [{1, {"1","foo",[]}}, - {1, {"2","foo",[]}}], - ?_assertEqual({TwoSibs, internal_node}, - merge_and_stem(TwoSibs, One)). - -should_produce_conflict_on_merge_with_unrelated_branch()-> - TwoSibs = [{1, {"1","foo",[]}}, - {1, {"2","foo",[]}}], - Three = {1, {"3","foo",[]}}, - ThreeSibs = [{1, {"1","foo",[]}}, - {1, {"2","foo",[]}}, - {1, {"3","foo",[]}}], - ?_assertEqual({ThreeSibs, new_branch}, - merge_and_stem(TwoSibs, Three)). - -should_merge_reflexive_for_child_nodes()-> - TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, - ?_assertEqual({[TwoChild], internal_node}, - merge_and_stem([TwoChild], TwoChild)). - -should_merge_tree_to_itself()-> - TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []}, - {"1b", "bar", []}]}}, +should_merge_with_empty_tree() -> + One = {1, {"1", "foo", []}}, + ?_assertEqual( + {[One], new_leaf}, + merge_and_stem([], One) + ). + +should_merge_reflexive() -> + One = {1, {"1", "foo", []}}, + ?_assertEqual( + {[One], internal_node}, + merge_and_stem([One], One) + ). + +should_merge_prefix_of_a_tree_with_tree() -> + One = {1, {"1", "foo", []}}, + TwoSibs = [ + {1, {"1", "foo", []}}, + {1, {"2", "foo", []}} + ], + ?_assertEqual( + {TwoSibs, internal_node}, + merge_and_stem(TwoSibs, One) + ). + +should_produce_conflict_on_merge_with_unrelated_branch() -> + TwoSibs = [ + {1, {"1", "foo", []}}, + {1, {"2", "foo", []}} + ], + Three = {1, {"3", "foo", []}}, + ThreeSibs = [ + {1, {"1", "foo", []}}, + {1, {"2", "foo", []}}, + {1, {"3", "foo", []}} + ], + ?_assertEqual( + {ThreeSibs, new_branch}, + merge_and_stem(TwoSibs, Three) + ). + +should_merge_reflexive_for_child_nodes() -> + TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, + ?_assertEqual( + {[TwoChild], internal_node}, + merge_and_stem([TwoChild], TwoChild) + ). + +should_merge_tree_to_itself() -> + TwoChildSibs = + {1, + {"1", "foo", [ + {"1a", "bar", []}, + {"1b", "bar", []} + ]}}, Leafs = couch_key_tree:get_all_leafs([TwoChildSibs]), Paths = lists:map(fun leaf_to_path/1, Leafs), - FinalTree = lists:foldl(fun(Path, TreeAcc) -> - {NewTree, internal_node} = merge_and_stem(TreeAcc, Path), - NewTree - end, [TwoChildSibs], Paths), + FinalTree = lists:foldl( + fun(Path, TreeAcc) -> + {NewTree, internal_node} = merge_and_stem(TreeAcc, Path), + NewTree + end, + [TwoChildSibs], + Paths + ), ?_assertEqual([TwoChildSibs], FinalTree). leaf_to_path({Value, {Start, Keys}}) -> @@ -154,260 +176,355 @@ to_branch(Value, [Key]) -> to_branch(Value, [Key | RestKeys]) -> [{Key, [], to_branch(Value, RestKeys)}]. - -should_merge_tree_of_odd_length()-> - TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, - TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []}, - {"1b", "bar", []}]}}, - TwoChildPlusSibs = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}, - {"1b", "bar", []}]}}, - ?_assertEqual({[TwoChildPlusSibs], new_leaf}, - merge_and_stem([TwoChildSibs], TwoChild)). - -should_merge_tree_with_stem()-> +should_merge_tree_of_odd_length() -> + TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, + TwoChildSibs = + {1, + {"1", "foo", [ + {"1a", "bar", []}, + {"1b", "bar", []} + ]}}, + TwoChildPlusSibs = + {1, + {"1", "foo", [ + {"1a", "bar", [{"1aa", "bar", []}]}, + {"1b", "bar", []} + ]}}, + ?_assertEqual( + {[TwoChildPlusSibs], new_leaf}, + merge_and_stem([TwoChildSibs], TwoChild) + ). + +should_merge_tree_with_stem() -> Stemmed = {2, {"1a", "bar", []}}, - TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []}, - {"1b", "bar", []}]}}, - - ?_assertEqual({[TwoChildSibs], internal_node}, - merge_and_stem([TwoChildSibs], Stemmed)). - -should_merge_with_stem_at_deeper_level()-> + TwoChildSibs = + {1, + {"1", "foo", [ + {"1a", "bar", []}, + {"1b", "bar", []} + ]}}, + + ?_assertEqual( + {[TwoChildSibs], internal_node}, + merge_and_stem([TwoChildSibs], Stemmed) + ). + +should_merge_with_stem_at_deeper_level() -> Stemmed = {3, {"1bb", "boo", []}}, - TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []}, - {"1b", "bar", [{"1bb", "boo", []}]}]}}, - ?_assertEqual({[TwoChildSibs], internal_node}, - merge_and_stem([TwoChildSibs], Stemmed)). - -should_merge_with_stem_at_deeper_level_with_deeper_paths()-> + TwoChildSibs = + {1, + {"1", "foo", [ + {"1a", "bar", []}, + {"1b", "bar", [{"1bb", "boo", []}]} + ]}}, + ?_assertEqual( + {[TwoChildSibs], internal_node}, + merge_and_stem([TwoChildSibs], Stemmed) + ). + +should_merge_with_stem_at_deeper_level_with_deeper_paths() -> Stemmed = {3, {"1bb", "boo", []}}, - StemmedTwoChildSibs = [{2,{"1a", "bar", []}}, - {2,{"1b", "bar", [{"1bb", "boo", []}]}}], - ?_assertEqual({StemmedTwoChildSibs, internal_node}, - merge_and_stem(StemmedTwoChildSibs, Stemmed)). - -should_merge_single_tree_with_deeper_stem()-> + StemmedTwoChildSibs = [ + {2, {"1a", "bar", []}}, + {2, {"1b", "bar", [{"1bb", "boo", []}]}} + ], + ?_assertEqual( + {StemmedTwoChildSibs, internal_node}, + merge_and_stem(StemmedTwoChildSibs, Stemmed) + ). + +should_merge_single_tree_with_deeper_stem() -> Stemmed = {3, {"1aa", "bar", []}}, - TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, - ?_assertEqual({[TwoChild], internal_node}, - merge_and_stem([TwoChild], Stemmed)). + TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, + ?_assertEqual( + {[TwoChild], internal_node}, + merge_and_stem([TwoChild], Stemmed) + ). -should_merge_tree_with_large_stem()-> +should_merge_tree_with_large_stem() -> Stemmed = {2, {"1a", "bar", [{"1aa", "bar", []}]}}, - TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, - ?_assertEqual({[TwoChild], internal_node}, - merge_and_stem([TwoChild], Stemmed)). + TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, + ?_assertEqual( + {[TwoChild], internal_node}, + merge_and_stem([TwoChild], Stemmed) + ). -should_merge_stems()-> +should_merge_stems() -> StemmedA = {2, {"1a", "bar", [{"1aa", "bar", []}]}}, StemmedB = {3, {"1aa", "bar", []}}, - ?_assertEqual({[StemmedA], internal_node}, - merge_and_stem([StemmedA], StemmedB)). + ?_assertEqual( + {[StemmedA], internal_node}, + merge_and_stem([StemmedA], StemmedB) + ). -should_create_conflicts_on_merge()-> - OneChild = {1, {"1","foo",[{"1a", "bar", []}]}}, +should_create_conflicts_on_merge() -> + OneChild = {1, {"1", "foo", [{"1a", "bar", []}]}}, Stemmed = {3, {"1aa", "bar", []}}, - ?_assertEqual({[OneChild, Stemmed], new_branch}, - merge_and_stem([OneChild], Stemmed)). + ?_assertEqual( + {[OneChild, Stemmed], new_branch}, + merge_and_stem([OneChild], Stemmed) + ). -should_create_no_conflicts_on_merge()-> - OneChild = {1, {"1","foo",[{"1a", "bar", []}]}}, +should_create_no_conflicts_on_merge() -> + OneChild = {1, {"1", "foo", [{"1a", "bar", []}]}}, Stemmed = {3, {"1aa", "bar", []}}, - TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, - ?_assertEqual({[TwoChild], new_leaf}, - merge_and_stem([OneChild, Stemmed], TwoChild)). + TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, + ?_assertEqual( + {[TwoChild], new_leaf}, + merge_and_stem([OneChild, Stemmed], TwoChild) + ). -should_ignore_conflicting_branch()-> +should_ignore_conflicting_branch() -> %% this test is based on couch-902-test-case2.py %% foo has conflicts from replication at depth two %% foo3 is the current value - Foo = {1, {"foo", - "val1", - [{"foo2","val2",[]}, + Foo = + {1, + {"foo", "val1", [ + {"foo2", "val2", []}, {"foo3", "val3", []} - ]}}, + ]}}, %% foo now has an attachment added, which leads to foo4 and val4 %% off foo3 - Bar = {1, {"foo", - [], - [{"foo3", - [], - [{"foo4","val4",[]} - ]}]}}, + Bar = {1, {"foo", [], [{"foo3", [], [{"foo4", "val4", []}]}]}}, %% this is what the merge returns %% note that it ignore the conflicting branch as there's no match - FooBar = {1, {"foo", - "val1", - [{"foo2","val2",[]}, - {"foo3", "val3", [{"foo4","val4",[]}]} - ]}}, + FooBar = + {1, + {"foo", "val1", [ + {"foo2", "val2", []}, + {"foo3", "val3", [{"foo4", "val4", []}]} + ]}}, { "COUCHDB-902", - ?_assertEqual({[FooBar], new_leaf}, - merge_and_stem([Foo], Bar)) + ?_assertEqual( + {[FooBar], new_leaf}, + merge_and_stem([Foo], Bar) + ) }. -should_not_find_missing_leaves()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual([], - couch_key_tree:find_missing(TwoChildSibs, - [{0,"1"}, {1,"1a"}])). - -should_find_missing_leaves()-> +should_not_find_missing_leaves() -> + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual( + [], + couch_key_tree:find_missing( + TwoChildSibs, + [{0, "1"}, {1, "1a"}] + ) + ). + +should_find_missing_leaves() -> Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], Stemmed2 = [{2, {"1aa", "bar", []}}], - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], [ ?_assertEqual( [{0, "10"}, {100, "x"}], couch_key_tree:find_missing( TwoChildSibs, - [{0,"1"}, {0, "10"}, {1,"1a"}, {100, "x"}])), + [{0, "1"}, {0, "10"}, {1, "1a"}, {100, "x"}] + ) + ), ?_assertEqual( [{0, "1"}, {100, "x"}], couch_key_tree:find_missing( Stemmed1, - [{0,"1"}, {1,"1a"}, {100, "x"}])), + [{0, "1"}, {1, "1a"}, {100, "x"}] + ) + ), ?_assertEqual( - [{0, "1"}, {1,"1a"}, {100, "x"}], + [{0, "1"}, {1, "1a"}, {100, "x"}], couch_key_tree:find_missing( Stemmed2, - [{0,"1"}, {1,"1a"}, {100, "x"}])) + [{0, "1"}, {1, "1a"}, {100, "x"}] + ) + ) ]. -should_have_no_effect_on_removing_no_leaves()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({TwoChildSibs, []}, - couch_key_tree:remove_leafs(TwoChildSibs, - [])). - -should_have_no_effect_on_removing_non_existant_branch()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({TwoChildSibs, []}, - couch_key_tree:remove_leafs(TwoChildSibs, - [{0, "1"}])). - -should_remove_leaf()-> - OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}], - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({OneChild, [{1, "1b"}]}, - couch_key_tree:remove_leafs(TwoChildSibs, - [{1, "1b"}])). - -should_produce_empty_tree_on_removing_all_leaves()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[], [{1, "1b"}, {1, "1a"}]}, - couch_key_tree:remove_leafs(TwoChildSibs, - [{1, "1b"}, {1, "1a"}])). - -should_have_no_effect_on_removing_non_existant_node()-> +should_have_no_effect_on_removing_no_leaves() -> + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual( + {TwoChildSibs, []}, + couch_key_tree:remove_leafs( + TwoChildSibs, + [] + ) + ). + +should_have_no_effect_on_removing_non_existant_branch() -> + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual( + {TwoChildSibs, []}, + couch_key_tree:remove_leafs( + TwoChildSibs, + [{0, "1"}] + ) + ). + +should_remove_leaf() -> + OneChild = [{0, {"1", "foo", [{"1a", "bar", []}]}}], + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual( + {OneChild, [{1, "1b"}]}, + couch_key_tree:remove_leafs( + TwoChildSibs, + [{1, "1b"}] + ) + ). + +should_produce_empty_tree_on_removing_all_leaves() -> + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual( + {[], [{1, "1b"}, {1, "1a"}]}, + couch_key_tree:remove_leafs( + TwoChildSibs, + [{1, "1b"}, {1, "1a"}] + ) + ). + +should_have_no_effect_on_removing_non_existant_node() -> Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], - ?_assertEqual({Stemmed, []}, - couch_key_tree:remove_leafs(Stemmed, - [{1, "1a"}])). - -should_produce_empty_tree_on_removing_last_leaf()-> + ?_assertEqual( + {Stemmed, []}, + couch_key_tree:remove_leafs( + Stemmed, + [{1, "1a"}] + ) + ). + +should_produce_empty_tree_on_removing_last_leaf() -> Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], - ?_assertEqual({[], [{2, "1aa"}]}, - couch_key_tree:remove_leafs(Stemmed, - [{2, "1aa"}])). - -should_extract_subtree()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[{"foo", {0, ["1"]}}],[]}, - couch_key_tree:get(TwoChildSibs, [{0, "1"}])). - -should_extract_subsubtree()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[{"bar", {1, ["1a", "1"]}}],[]}, - couch_key_tree:get(TwoChildSibs, [{1, "1a"}])). - -should_gather_non_existant_leaf()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[],[{0, "x"}]}, - couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}])). - -should_gather_leaf()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[{"bar", {1, ["1a","1"]}}],[]}, - couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}])). - -shoul_gather_multiple_leaves()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]}, - couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}])). + ?_assertEqual( + {[], [{2, "1aa"}]}, + couch_key_tree:remove_leafs( + Stemmed, + [{2, "1aa"}] + ) + ). + +should_extract_subtree() -> + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual( + {[{"foo", {0, ["1"]}}], []}, + couch_key_tree:get(TwoChildSibs, [{0, "1"}]) + ). + +should_extract_subsubtree() -> + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual( + {[{"bar", {1, ["1a", "1"]}}], []}, + couch_key_tree:get(TwoChildSibs, [{1, "1a"}]) + ). + +should_gather_non_existant_leaf() -> + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual( + {[], [{0, "x"}]}, + couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}]) + ). + +should_gather_leaf() -> + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual( + {[{"bar", {1, ["1a", "1"]}}], []}, + couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}]) + ). + +shoul_gather_multiple_leaves() -> + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual( + {[{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b", "1"]}}], []}, + couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}]) + ). should_gather_single_leaf_for_multiple_revs() -> - OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}], + OneChild = [{0, {"1", "foo", [{"1a", "bar", []}]}}], ToFind = [{0, "1"}, {1, "1a"}], - ?_assertEqual({[{"bar", {1, ["1a", "1"]}}],[]}, - couch_key_tree:get_key_leafs(OneChild, ToFind)). + ?_assertEqual( + {[{"bar", {1, ["1a", "1"]}}], []}, + couch_key_tree:get_key_leafs(OneChild, ToFind) + ). should_gather_multiple_for_multiple_revs() -> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], ToFind = [{0, "1"}, {1, "1a"}], - ?_assertEqual({[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]}, - couch_key_tree:get_key_leafs(TwoChildSibs, ToFind)). - -should_retrieve_full_key_path()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[{0,[{"1", "foo"}]}],[]}, - couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}])). - -should_retrieve_full_key_path_for_node()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[{1,[{"1a", "bar"},{"1", "foo"}]}],[]}, - couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}])). - -should_retrieve_leaves_with_parent_node()-> + ?_assertEqual( + {[{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b", "1"]}}], []}, + couch_key_tree:get_key_leafs(TwoChildSibs, ToFind) + ). + +should_retrieve_full_key_path() -> + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual( + {[{0, [{"1", "foo"}]}], []}, + couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}]) + ). + +should_retrieve_full_key_path_for_node() -> + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual( + {[{1, [{"1a", "bar"}, {"1", "foo"}]}], []}, + couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}]) + ). + +should_retrieve_leaves_with_parent_node() -> Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], [ - ?_assertEqual([{2, [{"1aa", "bar"},{"1a", "bar"}]}], - couch_key_tree:get_all_leafs_full(Stemmed)), - ?_assertEqual([{1, [{"1a", "bar"},{"1", "foo"}]}, - {1, [{"1b", "bar"},{"1", "foo"}]}], - couch_key_tree:get_all_leafs_full(TwoChildSibs)) + ?_assertEqual( + [{2, [{"1aa", "bar"}, {"1a", "bar"}]}], + couch_key_tree:get_all_leafs_full(Stemmed) + ), + ?_assertEqual( + [ + {1, [{"1a", "bar"}, {"1", "foo"}]}, + {1, [{"1b", "bar"}, {"1", "foo"}]} + ], + couch_key_tree:get_all_leafs_full(TwoChildSibs) + ) ]. -should_retrieve_all_leaves()-> +should_retrieve_all_leaves() -> Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], [ - ?_assertEqual([{"bar", {2, ["1aa","1a"]}}], - couch_key_tree:get_all_leafs(Stemmed)), - ?_assertEqual([{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b","1"]}}], - couch_key_tree:get_all_leafs(TwoChildSibs)) + ?_assertEqual( + [{"bar", {2, ["1aa", "1a"]}}], + couch_key_tree:get_all_leafs(Stemmed) + ), + ?_assertEqual( + [{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b", "1"]}}], + couch_key_tree:get_all_leafs(TwoChildSibs) + ) ]. -should_have_no_leaves_for_empty_tree()-> +should_have_no_leaves_for_empty_tree() -> ?_assertEqual(0, couch_key_tree:count_leafs([])). -should_have_single_leaf_for_tree_with_single_node()-> - ?_assertEqual(1, couch_key_tree:count_leafs([{0, {"1","foo",[]}}])). +should_have_single_leaf_for_tree_with_single_node() -> + ?_assertEqual(1, couch_key_tree:count_leafs([{0, {"1", "foo", []}}])). -should_have_two_leaves_for_tree_with_chindler_siblings()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], +should_have_two_leaves_for_tree_with_chindler_siblings() -> + TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], ?_assertEqual(2, couch_key_tree:count_leafs(TwoChildSibs)). -should_not_affect_on_leaf_counting_for_stemmed_tree()-> +should_not_affect_on_leaf_counting_for_stemmed_tree() -> ?_assertEqual(1, couch_key_tree:count_leafs([{2, {"1bb", "boo", []}}])). -should_have_no_effect_for_stemming_more_levels_than_exists()-> - TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}], +should_have_no_effect_for_stemming_more_levels_than_exists() -> + TwoChild = [{0, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}], ?_assertEqual(TwoChild, couch_key_tree:stem(TwoChild, 3)). -should_return_one_deepest_node()-> - TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}], +should_return_one_deepest_node() -> + TwoChild = [{0, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}], Stemmed = [{2, {"1aa", "bar", []}}], ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 1)). -should_return_two_deepest_nodes()-> - TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}], +should_return_two_deepest_nodes() -> + TwoChild = [{0, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}], Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 2)). - merge_and_stem(RevTree, Tree) -> {Merged, Result} = couch_key_tree:merge(RevTree, Tree), {couch_key_tree:stem(Merged, ?DEPTH), Result}. diff --git a/src/couch/test/eunit/couch_passwords_tests.erl b/src/couch/test/eunit/couch_passwords_tests.erl index 88de8530f..6b67a99e3 100644 --- a/src/couch/test/eunit/couch_passwords_tests.erl +++ b/src/couch/test/eunit/couch_passwords_tests.erl @@ -14,41 +14,52 @@ -include_lib("couch/include/couch_eunit.hrl"). +pbkdf2_test_() -> + {"PBKDF2", [ + {"Iterations: 1, length: 20", + ?_assertEqual( + {ok, <<"0c60c80f961f0e71f3a9b524af6012062fe037a6">>}, + couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 1, 20) + )}, -pbkdf2_test_()-> - {"PBKDF2", - [ - {"Iterations: 1, length: 20", - ?_assertEqual( - {ok, <<"0c60c80f961f0e71f3a9b524af6012062fe037a6">>}, - couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 1, 20))}, + {"Iterations: 2, length: 20", + ?_assertEqual( + {ok, <<"ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957">>}, + couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 2, 20) + )}, - {"Iterations: 2, length: 20", - ?_assertEqual( - {ok, <<"ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957">>}, - couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 2, 20))}, + {"Iterations: 4096, length: 20", + ?_assertEqual( + {ok, <<"4b007901b765489abead49d926f721d065a429c1">>}, + couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 4096, 20) + )}, - {"Iterations: 4096, length: 20", - ?_assertEqual( - {ok, <<"4b007901b765489abead49d926f721d065a429c1">>}, - couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 4096, 20))}, + {"Iterations: 4096, length: 25", + ?_assertEqual( + {ok, <<"3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038">>}, + couch_passwords:pbkdf2( + <<"passwordPASSWORDpassword">>, + <<"saltSALTsaltSALTsaltSALTsaltSALTsalt">>, + 4096, + 25 + ) + )}, + {"Null byte", + ?_assertEqual( + {ok, <<"56fa6aa75548099dcc37d7f03425e0c3">>}, + couch_passwords:pbkdf2( + <<"pass\0word">>, + <<"sa\0lt">>, + 4096, + 16 + ) + )}, - {"Iterations: 4096, length: 25", - ?_assertEqual( - {ok, <<"3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038">>}, - couch_passwords:pbkdf2(<<"passwordPASSWORDpassword">>, - <<"saltSALTsaltSALTsaltSALTsaltSALTsalt">>, - 4096, 25))}, - {"Null byte", - ?_assertEqual( - {ok, <<"56fa6aa75548099dcc37d7f03425e0c3">>}, - couch_passwords:pbkdf2(<<"pass\0word">>, - <<"sa\0lt">>, - 4096, 16))}, - - {timeout, 600, %% this may runs too long on slow hosts - {"Iterations: 16777216 - this may take some time", - ?_assertEqual( - {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>}, - couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20) - )}}]}. + %% this may runs too long on slow hosts + {timeout, 600, + {"Iterations: 16777216 - this may take some time", + ?_assertEqual( + {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>}, + couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20) + )}} + ]}. diff --git a/src/couch/test/eunit/couch_query_servers_tests.erl b/src/couch/test/eunit/couch_query_servers_tests.erl index 440fc8e1b..01631ba28 100644 --- a/src/couch/test/eunit/couch_query_servers_tests.erl +++ b/src/couch/test/eunit/couch_query_servers_tests.erl @@ -15,24 +15,19 @@ -include_lib("couch/include/couch_db.hrl"). -include_lib("couch/include/couch_eunit.hrl"). - setup() -> meck:new([config, couch_log]). - teardown(_) -> meck:unload(). - setup_oom() -> test_util:start_couch([ioq]). - teardown_oom(Ctx) -> meck:unload(), test_util:stop_couch(Ctx). - sum_overflow_test_() -> { "Test overflow detection in the _sum reduce function", @@ -48,26 +43,27 @@ sum_overflow_test_() -> } }. - filter_oom_test_() -> -{ - "Test recovery from oom in filters", { - setup, - fun setup_oom/0, - fun teardown_oom/1, - [ - fun should_split_large_batches/0 - ] - } -}. + "Test recovery from oom in filters", + { + setup, + fun setup_oom/0, + fun teardown_oom/1, + [ + fun should_split_large_batches/0 + ] + } + }. should_return_error_on_overflow() -> meck:reset([config, couch_log]), meck:expect( - config, get, ["query_server_config", "reduce_limit", "true"], - "true" - ), + config, + get, + ["query_server_config", "reduce_limit", "true"], + "true" + ), meck:expect(couch_log, error, ['_', '_'], ok), KVs = gen_sum_kvs(), {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs), @@ -75,13 +71,14 @@ should_return_error_on_overflow() -> ?assert(meck:called(config, get, '_')), ?assert(meck:called(couch_log, error, '_')). - should_return_object_on_log() -> meck:reset([config, couch_log]), meck:expect( - config, get, ["query_server_config", "reduce_limit", "true"], - "log" - ), + config, + get, + ["query_server_config", "reduce_limit", "true"], + "log" + ), meck:expect(couch_log, error, ['_', '_'], ok), KVs = gen_sum_kvs(), {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs), @@ -91,13 +88,14 @@ should_return_object_on_log() -> ?assert(meck:called(config, get, '_')), ?assert(meck:called(couch_log, error, '_')). - should_return_object_on_false() -> meck:reset([config, couch_log]), meck:expect( - config, get, ["query_server_config", "reduce_limit", "true"], - "false" - ), + config, + get, + ["query_server_config", "reduce_limit", "true"], + "false" + ), meck:expect(couch_log, error, ['_', '_'], ok), KVs = gen_sum_kvs(), {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs), @@ -107,18 +105,19 @@ should_return_object_on_false() -> ?assert(meck:called(config, get, '_')), ?assertNot(meck:called(couch_log, error, '_')). - should_split_large_batches() -> Req = {json_req, {[]}}, Db = undefined, DDoc = #doc{ id = <<"_design/foo">>, revs = {0, [<<"bork bork bork">>]}, - body = {[ - {<<"filters">>, {[ - {<<"bar">>, <<"function(req, doc) {return true;}">>} - ]}} - ]} + body = + {[ + {<<"filters">>, + {[ + {<<"bar">>, <<"function(req, doc) {return true;}">>} + ]}} + ]} }, FName = <<"bar">>, Docs = [ @@ -139,12 +138,17 @@ should_split_large_batches() -> {ok, Ret} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs), ?assertEqual([split_batch, split_batch], Ret). - gen_sum_kvs() -> - lists:map(fun(I) -> - Props = lists:map(fun(_) -> - K = couch_util:encodeBase64Url(crypto:strong_rand_bytes(16)), - {K, 1} - end, lists:seq(1, 20)), - [I, {Props}] - end, lists:seq(1, 10)). + lists:map( + fun(I) -> + Props = lists:map( + fun(_) -> + K = couch_util:encodeBase64Url(crypto:strong_rand_bytes(16)), + {K, 1} + end, + lists:seq(1, 20) + ), + [I, {Props}] + end, + lists:seq(1, 10) + ). diff --git a/src/couch/test/eunit/couch_server_tests.erl b/src/couch/test/eunit/couch_server_tests.erl index 66533d48c..a43106d89 100644 --- a/src/couch/test/eunit/couch_server_tests.erl +++ b/src/couch/test/eunit/couch_server_tests.erl @@ -45,13 +45,13 @@ teardown(rename, Db) -> teardown(_, Db) -> teardown(Db). - delete_db_test_() -> { "Test for proper deletion of db file", { setup, - fun start/0, fun test_util:stop/1, + fun start/0, + fun test_util:stop/1, [ make_test_case(rename, [fun should_rename_on_delete/2]), make_test_case(delete, [fun should_delete/2]) @@ -76,7 +76,8 @@ should_rename_on_delete(_, Db) -> ?assertMatch([_], DeletedFiles), [Renamed] = DeletedFiles, ?assertEqual( - filename:extension(Origin), filename:extension(Renamed)), + filename:extension(Origin), filename:extension(Renamed) + ), ?assert(filelib:is_regular(Renamed)) end). @@ -93,7 +94,6 @@ should_delete(_, Db) -> deleted_files(ViewFile) -> filelib:wildcard(filename:rootname(ViewFile) ++ "*.deleted.*"). - bad_engine_option_test_() -> { setup, @@ -104,19 +104,19 @@ bad_engine_option_test_() -> ] }. - t_bad_engine_option() -> Resp = couch_server:create(?tempdb(), [{engine, <<"cowabunga!">>}]), ?assertEqual(Resp, {error, {invalid_engine_extension, <<"cowabunga!">>}}). - get_engine_path_test_() -> { setup, - fun start/0, fun test_util:stop/1, + fun start/0, + fun test_util:stop/1, { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_return_engine_path/1, fun should_return_invalid_engine_error/1 @@ -124,7 +124,6 @@ get_engine_path_test_() -> } }. - should_return_engine_path(Db) -> DbName = couch_db:name(Db), Engine = couch_db_engine:get_engine(Db), @@ -132,14 +131,12 @@ should_return_engine_path(Db) -> FilePath = couch_db:get_filepath(Db), ?_assertMatch({ok, FilePath}, Resp). - should_return_invalid_engine_error(Db) -> DbName = couch_db:name(Db), Engine = fake_engine, Resp = couch_server:get_engine_path(DbName, Engine), ?_assertMatch({error, {invalid_engine, Engine}}, Resp). - interleaved_requests_test_() -> { setup, @@ -148,7 +145,6 @@ interleaved_requests_test_() -> fun make_interleaved_requests/1 }. - start_interleaved() -> TestDbName = ?tempdb(), meck:new(couch_db, [passthrough]), @@ -180,19 +176,16 @@ start_interleaved() -> end), {test_util:start_couch(), TestDbName}. - stop_interleaved({Ctx, TestDbName}) -> couch_server:delete(TestDbName, [?ADMIN_CTX]), meck:unload(), test_util:stop_couch(Ctx). - make_interleaved_requests({_, TestDbName}) -> [ fun() -> t_interleaved_create_delete_open(TestDbName) end ]. - t_interleaved_create_delete_open(DbName) -> {CrtRef, OpenRef} = {make_ref(), make_ref()}, CrtMsg = {'$gen_call', {self(), CrtRef}, {create, DbName, [?ADMIN_CTX]}}, @@ -247,7 +240,6 @@ t_interleaved_create_delete_open(DbName) -> ?assert(is_process_alive(CouchServer)), check_monitor_not_triggered(CSRef). - get_opener_pid(DbName) -> WaitFun = fun() -> case ets:lookup(couch_server:couch_dbs(DbName), DbName) of @@ -259,23 +251,28 @@ get_opener_pid(DbName) -> end, test_util:wait(WaitFun). - wait_for_open_async_result(CouchServer, Opener) -> WaitFun = fun() -> {_, Messages} = erlang:process_info(CouchServer, messages), - Found = lists:foldl(fun(Msg, Acc) -> - case Msg of - {'$gen_call', {Opener, _}, {open_result, _, {ok, _}}} -> - true; - _ -> - Acc - end - end, false, Messages), - if Found -> ok; true -> wait end + Found = lists:foldl( + fun(Msg, Acc) -> + case Msg of + {'$gen_call', {Opener, _}, {open_result, _, {ok, _}}} -> + true; + _ -> + Acc + end + end, + false, + Messages + ), + if + Found -> ok; + true -> wait + end end, test_util:wait(WaitFun). - check_monitor_not_triggered(Ref) -> receive {'DOWN', Ref, _, _, Reason0} -> @@ -284,7 +281,6 @@ check_monitor_not_triggered(Ref) -> ok end. - get_next_message() -> receive Msg -> diff --git a/src/couch/test/eunit/couch_stream_tests.erl b/src/couch/test/eunit/couch_stream_tests.erl index a7fedf0af..4146a9139 100644 --- a/src/couch/test/eunit/couch_stream_tests.erl +++ b/src/couch/test/eunit/couch_stream_tests.erl @@ -24,16 +24,17 @@ setup() -> teardown({Fd, _}) -> ok = couch_file:close(Fd). - stream_test_() -> { "CouchDB stream tests", { setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + fun() -> test_util:start(?MODULE, [ioq]) end, + fun test_util:stop/1, { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_write/1, fun should_write_consecutive/1, @@ -49,7 +50,6 @@ stream_test_() -> } }. - should_write({_, Stream}) -> ?_assertEqual(ok, couch_stream:write(Stream, <<"food">>)). @@ -98,7 +98,10 @@ should_stream_more_with_4K_chunk_size({Fd, _}) -> Data = <<"a1b2c">>, couch_stream:write(Stream, Data), [Data | Acc] - end, [], lists:seq(1, 1024)), + end, + [], + lists:seq(1, 1024) + ), {NewEngine, Length, _, _, _} = couch_stream:close(Stream), {ok, Ptrs} = couch_stream:to_disk_term(NewEngine), ?_assertMatch({[{0, 4100}, {4106, 1020}], 5120}, {Ptrs, Length}). @@ -109,16 +112,17 @@ should_stop_on_normal_exit_of_stream_opener({Fd, _}) -> fun() -> {ok, StreamPid} = couch_stream:open(?ENGINE(Fd)), RunnerPid ! {pid, StreamPid} - end), - StreamPid = receive - {pid, StreamPid0} -> StreamPid0 - end, + end + ), + StreamPid = + receive + {pid, StreamPid0} -> StreamPid0 + end, % Confirm the validity of the test by verifying the stream opener has died ?assertNot(is_process_alive(OpenerPid)), % Verify the stream itself has also died ?_assertNot(is_process_alive(StreamPid)). - read_all(Engine) -> Data = couch_stream:foldl(Engine, fun(Bin, Acc) -> [Bin, Acc] end, []), iolist_to_binary(Data). diff --git a/src/couch/test/eunit/couch_task_status_tests.erl b/src/couch/test/eunit/couch_task_status_tests.erl index 0ec03563b..f888dd596 100644 --- a/src/couch/test/eunit/couch_task_status_tests.erl +++ b/src/couch/test/eunit/couch_task_status_tests.erl @@ -17,27 +17,30 @@ -define(TIMEOUT, 1000). - setup() -> Ctx = test_util:start(?MODULE, [couch_log], [{dont_mock, [config]}]), {ok, TaskStatusPid} = couch_task_status:start_link(), TaskUpdaterPid = spawn(fun() -> loop() end), {TaskStatusPid, TaskUpdaterPid, Ctx}. - -teardown({TaskStatusPid, _, Ctx})-> - test_util:stop_sync_throw(TaskStatusPid, fun() -> - couch_task_status:stop() - end, timeout_error, ?TIMEOUT), +teardown({TaskStatusPid, _, Ctx}) -> + test_util:stop_sync_throw( + TaskStatusPid, + fun() -> + couch_task_status:stop() + end, + timeout_error, + ?TIMEOUT + ), test_util:stop(Ctx). - couch_task_status_test_() -> { "CouchDB task status updates", { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_register_task/1, fun should_set_task_startup_time/1, @@ -51,12 +54,10 @@ couch_task_status_test_() -> fun should_reset_control_update_frequency/1, fun should_track_multiple_tasks/1, fun should_finish_task/1 - ] } }. - should_register_task({_, Pid, _Ctx}) -> ok = call(Pid, add, [{type, replication}, {progress, 0}]), ?_assertEqual(1, length(couch_task_status:all())). @@ -76,8 +77,10 @@ should_set_task_type({_, Pid, _Ctx}) -> should_not_register_multiple_tasks_for_same_pid({_, Pid, _Ctx}) -> ok = call(Pid, add, [{type, replication}, {progress, 0}]), - ?_assertEqual({add_task_error, already_registered}, - call(Pid, add, [{type, compaction}, {progress, 0}])). + ?_assertEqual( + {add_task_error, already_registered}, + call(Pid, add, [{type, compaction}, {progress, 0}]) + ). should_set_task_progress({_, Pid, _Ctx}) -> ok = call(Pid, add, [{type, replication}, {progress, 0}]), @@ -92,10 +95,12 @@ should_update_time_changes_on_task_progress({_, Pid, _Ctx}) -> ?_assert( begin ok = call(Pid, add, [{type, replication}, {progress, 0}]), - ok = timer:sleep(1000), % sleep awhile to customize update time + % sleep awhile to customize update time + ok = timer:sleep(1000), call(Pid, update, [{progress, 25}]), get_task_prop(Pid, updated_on) > get_task_prop(Pid, started_on) - end). + end + ). %%should_control_update_frequency({_, Pid, _Ctx}) -> %% ?_assertEqual(66, @@ -109,7 +114,8 @@ should_update_time_changes_on_task_progress({_, Pid, _Ctx}) -> %% end). should_reset_control_update_frequency({_, Pid, _Ctx}) -> - ?_assertEqual(87, + ?_assertEqual( + 87, begin ok = call(Pid, add, [{type, replication}, {progress, 0}]), call(Pid, update, [{progress, 50}]), @@ -119,7 +125,8 @@ should_reset_control_update_frequency({_, Pid, _Ctx}) -> call(Pid, update_frequency, 0), call(Pid, update, [{progress, 87}]), get_task_prop(Pid, progress) - end). + end + ). should_track_multiple_tasks(_) -> ?_assert(run_multiple_tasks()). @@ -130,7 +137,6 @@ should_finish_task({_, Pid, _Ctx}) -> ok = call(Pid, done), ?_assertEqual(0, length(couch_task_status:all())). - run_multiple_tasks() -> Pid1 = spawn(fun() -> loop() end), Pid2 = spawn(fun() -> loop() end), @@ -161,7 +167,6 @@ run_multiple_tasks() -> true. - loop() -> receive {add, Props, From} -> @@ -188,7 +193,7 @@ call(Pid, done) -> {'DOWN', Ref, _Type, Pid, _Info} -> Res after ?TIMEOUT -> - throw(timeout_error) + throw(timeout_error) end; call(Pid, Command) -> Pid ! {Command, self()}, @@ -217,17 +222,22 @@ get_task_prop(Pid, Prop) -> Acc end end, - [], couch_task_status:all() + [], + couch_task_status:all() ), case couch_util:get_value(Prop, hd(Element), nil) of nil -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Could not get property '" - ++ couch_util:to_list(Prop) - ++ "' for task " - ++ pid_to_list(Pid)}]}); + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, + "Could not get property '" ++ + couch_util:to_list(Prop) ++ + "' for task " ++ + pid_to_list(Pid)} + ]} + ); Value -> Value end. diff --git a/src/couch/test/eunit/couch_util_tests.erl b/src/couch/test/eunit/couch_util_tests.erl index 44a5cce0a..c07ddc093 100644 --- a/src/couch/test/eunit/couch_util_tests.erl +++ b/src/couch/test/eunit/couch_util_tests.erl @@ -14,7 +14,6 @@ -include_lib("couch/include/couch_eunit.hrl"). - validate_callback_exists_test_() -> { "validate_callback_exists tests", @@ -33,8 +32,10 @@ implode_test() -> ?assertEqual([1, 38, 2, 38, 3], couch_util:implode([1, 2, 3], "&")). trim_test() -> - lists:map(fun(S) -> ?assertEqual("foo", couch_util:trim(S)) end, - [" foo", "foo ", "\tfoo", " foo ", "foo\t", "foo\n", "\nfoo"]). + lists:map( + fun(S) -> ?assertEqual("foo", couch_util:trim(S)) end, + [" foo", "foo ", "\tfoo", " foo ", "foo\t", "foo\n", "\nfoo"] + ). abs_pathname_test() -> {ok, Cwd} = file:get_cwd(), @@ -44,8 +45,10 @@ flush_test() -> ?assertNot(couch_util:should_flush()), AcquireMem = fun() -> _IntsToAGazillion = lists:seq(1, 200000), - _LotsOfData = lists:map(fun(_) -> <<"foobar">> end, - lists:seq(1, 500000)), + _LotsOfData = lists:map( + fun(_) -> <<"foobar">> end, + lists:seq(1, 500000) + ), _ = list_to_binary(_LotsOfData), %% Allocation 200K tuples puts us above the memory threshold @@ -97,11 +100,20 @@ find_in_binary_test_() -> ], lists:map( fun({Needle, Haystack, Result}) -> - Msg = lists:flatten(io_lib:format("Looking for ~s in ~s", - [Needle, Haystack])), - {Msg, ?_assertMatch(Result, - couch_util:find_in_binary(Needle, Haystack))} - end, Cases). + Msg = lists:flatten( + io_lib:format( + "Looking for ~s in ~s", + [Needle, Haystack] + ) + ), + {Msg, + ?_assertMatch( + Result, + couch_util:find_in_binary(Needle, Haystack) + )} + end, + Cases + ). should_succeed_for_existent_cb() -> ?_assert(couch_util:validate_callback_exists(lists, any, 2)). @@ -115,10 +127,14 @@ should_fail_for_missing_cb() -> lists:map( fun({M, F, A} = MFA) -> Name = lists:flatten(io_lib:format("~w:~w/~w", [M, F, A])), - {Name, ?_assertThrow( - {error, {undefined_callback, Name, MFA}}, - couch_util:validate_callback_exists(M, F, A))} - end, Cases). + {Name, + ?_assertThrow( + {error, {undefined_callback, Name, MFA}}, + couch_util:validate_callback_exists(M, F, A) + )} + end, + Cases + ). to_hex_test_() -> [ diff --git a/src/couch/test/eunit/couch_uuids_tests.erl b/src/couch/test/eunit/couch_uuids_tests.erl index 9ca2c8a84..6546779bb 100644 --- a/src/couch/test/eunit/couch_uuids_tests.erl +++ b/src/couch/test/eunit/couch_uuids_tests.erl @@ -16,17 +16,14 @@ -define(TIMEOUT, 20). - setup_all() -> test_util:start_applications([config]), couch_uuids:start(). - teardown_all(_) -> couch_uuids:stop(), test_util:stop_applications([config]). - uuids_test_() -> { setup, @@ -40,25 +37,21 @@ uuids_test_() -> ] }. - default_algorithm() -> config:delete("uuids", "algorithm", false), check_unique(). - sequential_algorithm() -> config:set("uuids", "algorithm", "sequential", false), check_unique(), check_increment_monotonically(), check_rollover(). - utc_algorithm() -> config:set("uuids", "algorithm", "utc_random", false), check_unique(), check_increment_monotonically(). - utc_id_suffix_algorithm() -> config:set("uuids", "algorithm", "utc_id", false), config:set("uuids", "utc_id_suffix", "bozo", false), @@ -66,36 +59,30 @@ utc_id_suffix_algorithm() -> check_increment_monotonically(), check_preserve_suffix(). - check_unique() -> %% this one may really runs for too long on slow hosts ?assert(test_unique(10000, [couch_uuids:new()])). - check_increment_monotonically() -> ?assert(couch_uuids:new() < couch_uuids:new()). - check_rollover() -> UUID = binary_to_list(couch_uuids:new()), Prefix = element(1, lists:split(26, UUID)), N = gen_until_pref_change(Prefix, 0), ?assert(N >= 5000 andalso N =< 11000). - check_preserve_suffix() -> UUID = binary_to_list(couch_uuids:new()), Suffix = get_suffix(UUID), ?assert(test_same_suffix(10000, Suffix)). - test_unique(0, _) -> true; test_unique(N, UUIDs) -> UUID = couch_uuids:new(), ?assertNot(lists:member(UUID, UUIDs)), - test_unique(N - 1, [UUID| UUIDs]). - + test_unique(N - 1, [UUID | UUIDs]). gen_until_pref_change(_, Count) when Count > 8251 -> Count; @@ -105,7 +92,6 @@ gen_until_pref_change(Prefix, N) -> _ -> N end. - test_same_suffix(0, _) -> true; test_same_suffix(N, Suffix) -> @@ -114,11 +100,9 @@ test_same_suffix(N, Suffix) -> _ -> false end. - get_prefix(UUID) -> element(1, lists:split(26, binary_to_list(UUID))). - get_suffix(UUID) when is_binary(UUID) -> get_suffix(binary_to_list(UUID)); get_suffix(UUID) -> diff --git a/src/couch/test/eunit/couch_work_queue_tests.erl b/src/couch/test/eunit/couch_work_queue_tests.erl index a192230ef..acf0e45dc 100644 --- a/src/couch/test/eunit/couch_work_queue_tests.erl +++ b/src/couch/test/eunit/couch_work_queue_tests.erl @@ -16,7 +16,6 @@ -define(TIMEOUT, 100). - setup(Opts) -> {ok, Q} = couch_work_queue:new(Opts), Producer = spawn_producer(Q), @@ -33,9 +32,11 @@ setup_max_items_and_size() -> setup([{max_size, 160}, {max_items, 3}]). setup_multi_workers() -> - {Q, Producer, Consumer1} = setup([{max_size, 160}, - {max_items, 3}, - {multi_workers, true}]), + {Q, Producer, Consumer1} = setup([ + {max_size, 160}, + {max_items, 3}, + {multi_workers, true} + ]), Consumer2 = spawn_consumer(Q), Consumer3 = spawn_consumer(Q), {Q, Producer, [Consumer1, Consumer2, Consumer3]}. @@ -52,7 +53,6 @@ teardown({Q, Producer, Consumers}) when is_list(Consumers) -> teardown({Q, Producer, Consumer}) -> teardown({Q, Producer, [Consumer]}). - single_consumer_test_() -> { "Single producer and consumer", @@ -61,7 +61,8 @@ single_consumer_test_() -> "Queue with 3 max items", { foreach, - fun setup_max_items/0, fun teardown/1, + fun setup_max_items/0, + fun teardown/1, single_consumer_max_item_count() ++ common_cases() } }, @@ -69,7 +70,8 @@ single_consumer_test_() -> "Queue with max size of 160 bytes", { foreach, - fun setup_max_size/0, fun teardown/1, + fun setup_max_size/0, + fun teardown/1, single_consumer_max_size() ++ common_cases() } }, @@ -77,7 +79,8 @@ single_consumer_test_() -> "Queue with max size of 160 bytes and 3 max items", { foreach, - fun setup_max_items_and_size/0, fun teardown/1, + fun setup_max_items_and_size/0, + fun teardown/1, single_consumer_max_items_and_size() ++ common_cases() } } @@ -92,15 +95,15 @@ multiple_consumers_test_() -> "Queue with max size of 160 bytes and 3 max items", { foreach, - fun setup_multi_workers/0, fun teardown/1, + fun setup_multi_workers/0, + fun teardown/1, common_cases() ++ multiple_consumers() } - } ] }. -common_cases()-> +common_cases() -> [ fun should_block_consumer_on_dequeue_from_empty_queue/1, fun should_consume_right_item/1, @@ -109,7 +112,7 @@ common_cases()-> fun should_be_closed/1 ]. -single_consumer_max_item_count()-> +single_consumer_max_item_count() -> [ fun should_have_no_items_for_new_queue/1, fun should_block_producer_on_full_queue_count/1, @@ -118,7 +121,7 @@ single_consumer_max_item_count()-> fun should_consume_all/1 ]. -single_consumer_max_size()-> +single_consumer_max_size() -> [ fun should_have_zero_size_for_new_queue/1, fun should_block_producer_on_full_queue_size/1, @@ -138,7 +141,6 @@ multiple_consumers() -> fun should_increase_queue_size_on_produce/1 ]. - should_have_no_items_for_new_queue({Q, _, _}) -> ?_assertEqual(0, couch_work_queue:item_count(Q)). @@ -172,8 +174,10 @@ should_consume_right_item({Q, Producer, Consumers}) when is_list(Consumers) -> ?assertEqual(0, couch_work_queue:item_count(Q)), ?assertEqual(0, couch_work_queue:size(Q)), - R = [{ping(C), Item} - || {C, Item} <- lists:zip(Consumers, [Item1, Item2, Item3])], + R = [ + {ping(C), Item} + || {C, Item} <- lists:zip(Consumers, [Item1, Item2, Item3]) + ], ?_assertEqual([{ok, Item1}, {ok, Item2}, {ok, Item3}], R); should_consume_right_item({Q, Producer, Consumer}) -> @@ -284,8 +288,10 @@ should_be_closed({Q, _, Consumers}) when is_list(Consumers) -> ItemsCount = couch_work_queue:item_count(Q), Size = couch_work_queue:size(Q), - ?_assertEqual({[closed, closed, closed], closed, closed}, - {LastConsumerItems, ItemsCount, Size}); + ?_assertEqual( + {[closed, closed, closed], closed, closed}, + {LastConsumerItems, ItemsCount, Size} + ); should_be_closed({Q, _, Consumer}) -> ok = close_queue(Q), @@ -295,14 +301,19 @@ should_be_closed({Q, _, Consumer}) -> ItemsCount = couch_work_queue:item_count(Q), Size = couch_work_queue:size(Q), - ?_assertEqual({closed, closed, closed}, - {LastConsumerItems, ItemsCount, Size}). - + ?_assertEqual( + {closed, closed, closed}, + {LastConsumerItems, ItemsCount, Size} + ). close_queue(Q) -> - test_util:stop_sync(Q, fun() -> - ok = couch_work_queue:close(Q) - end, ?TIMEOUT). + test_util:stop_sync( + Q, + fun() -> + ok = couch_work_queue:close(Q) + end, + ?TIMEOUT + ). spawn_consumer(Q) -> Parent = self(), @@ -365,10 +376,13 @@ produce(Q, Producer, Size, Wait) -> {item, Ref, Item} -> Item after ?TIMEOUT -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Timeout asking producer to produce an item"}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, "Timeout asking producer to produce an item"} + ]} + ) end. ping(Pid) -> @@ -393,10 +407,10 @@ stop(Pid, Name) -> wait_increment(Q, ItemsCount) -> test_util:wait(fun() -> - case couch_work_queue:item_count(Q) > ItemsCount of - true -> - ok; - false -> - wait - end + case couch_work_queue:item_count(Q) > ItemsCount of + true -> + ok; + false -> + wait + end end). diff --git a/src/couch/test/eunit/couchdb_attachments_tests.erl b/src/couch/test/eunit/couchdb_attachments_tests.erl index 04859dbc9..376553985 100644 --- a/src/couch/test/eunit/couchdb_attachments_tests.erl +++ b/src/couch/test/eunit/couchdb_attachments_tests.erl @@ -26,12 +26,15 @@ -define(TIMEWAIT, 1000). -define(i2l(I), integer_to_list(I)). - start() -> Ctx = test_util:start_couch(), % ensure in default compression settings for attachments_compression_tests - config:set("attachments", "compression_level", - ?i2l(?COMPRESSION_LEVEL), false), + config:set( + "attachments", + "compression_level", + ?i2l(?COMPRESSION_LEVEL), + false + ), config:set("attachments", "compressible_types", "text/*", false), Ctx. @@ -46,7 +49,7 @@ setup() -> setup({binary, standalone}) -> {Host, DbName} = setup(), - setup_att(fun create_standalone_png_att/2, Host, DbName, ?FIXTURE_PNG); + setup_att(fun create_standalone_png_att/2, Host, DbName, ?FIXTURE_PNG); setup({text, standalone}) -> {Host, DbName} = setup(), setup_att(fun create_standalone_text_att/2, Host, DbName, ?FIXTURE_TXT); @@ -76,13 +79,13 @@ teardown(DbName) -> ok = couch_server:delete(?l2b(DbName), []), ok. - attachments_test_() -> { "Attachments tests", { setup, - fun start/0, fun test_util:stop_couch/1, + fun start/0, + fun test_util:stop_couch/1, [ attachments_md5_tests(), attachments_compression_tests() @@ -95,7 +98,8 @@ attachments_md5_tests() -> "Attachments MD5 tests", { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_upload_attachment_without_md5/1, fun should_upload_attachment_by_chunks_without_md5/1, @@ -111,12 +115,12 @@ attachments_md5_tests() -> attachments_compression_tests() -> Funs = [ - fun should_get_att_without_accept_gzip_encoding/2, - fun should_get_att_with_accept_gzip_encoding/2, - fun should_get_att_with_accept_deflate_encoding/2, - fun should_return_406_response_on_unsupported_encoding/2, - fun should_get_doc_with_att_data/2, - fun should_get_doc_with_att_data_stub/2 + fun should_get_att_without_accept_gzip_encoding/2, + fun should_get_att_with_accept_gzip_encoding/2, + fun should_get_att_with_accept_deflate_encoding/2, + fun should_return_406_response_on_unsupported_encoding/2, + fun should_get_doc_with_att_data/2, + fun should_get_doc_with_att_data_stub/2 ], { "Attachments compression tests", @@ -133,13 +137,15 @@ attachments_compression_tests() -> "Created already been compressed via Attachments API", { foreachx, - fun setup/1, fun teardown/2, + fun setup/1, + fun teardown/2, [{compressed, Fun} || Fun <- Funs] } }, { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_not_create_compressed_att_with_deflate_encoding/1, fun should_not_create_compressed_att_with_compress_encoding/1, @@ -155,7 +161,8 @@ created_attachments_compression_tests(Mod, Funs) -> "Compressiable attachments", { foreachx, - fun setup/1, fun teardown/2, + fun setup/1, + fun teardown/2, [{{text, Mod}, Fun} || Fun <- Funs] } }, @@ -163,14 +170,13 @@ created_attachments_compression_tests(Mod, Funs) -> "Uncompressiable attachments", { foreachx, - fun setup/1, fun teardown/2, + fun setup/1, + fun teardown/2, [{{binary, Mod}, Fun} || Fun <- Funs] } } ]. - - should_upload_attachment_without_md5({Host, DbName}) -> ?_test(begin AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), @@ -238,9 +244,12 @@ should_upload_attachment_by_chunks_with_valid_md5_trailer({Host, DbName}) -> AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), AttData = <<"We all live in a yellow submarine!">>, <<Part1:21/binary, Part2:13/binary>> = AttData, - Body = [chunked_body([Part1, Part2]), - "Content-MD5: ", base64:encode(couch_hash:md5_hash(AttData)), - "\r\n\r\n"], + Body = [ + chunked_body([Part1, Part2]), + "Content-MD5: ", + base64:encode(couch_hash:md5_hash(AttData)), + "\r\n\r\n" + ], Headers = [ {"Content-Type", "text/plain"}, {"Host", Host}, @@ -264,11 +273,12 @@ should_reject_attachment_with_invalid_md5({Host, DbName}) -> ], {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), ?assertEqual(400, Code), - ?assertEqual(<<"content_md5_mismatch">>, - get_json(Json, [<<"error">>])) + ?assertEqual( + <<"content_md5_mismatch">>, + get_json(Json, [<<"error">>]) + ) end). - should_reject_chunked_attachment_with_invalid_md5({Host, DbName}) -> ?_test(begin AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), @@ -283,8 +293,10 @@ should_reject_chunked_attachment_with_invalid_md5({Host, DbName}) -> ], {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), ?assertEqual(400, Code), - ?assertEqual(<<"content_md5_mismatch">>, - get_json(Json, [<<"error">>])) + ?assertEqual( + <<"content_md5_mismatch">>, + get_json(Json, [<<"error">>]) + ) end). should_reject_chunked_attachment_with_invalid_md5_trailer({Host, DbName}) -> @@ -292,9 +304,12 @@ should_reject_chunked_attachment_with_invalid_md5_trailer({Host, DbName}) -> AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), AttData = <<"We all live in a yellow submarine!">>, <<Part1:21/binary, Part2:13/binary>> = AttData, - Body = [chunked_body([Part1, Part2]), - "Content-MD5: ", base64:encode(<<"foobar!">>), - "\r\n\r\n"], + Body = [ + chunked_body([Part1, Part2]), + "Content-MD5: ", + base64:encode(<<"foobar!">>), + "\r\n\r\n" + ], Headers = [ {"Content-Type", "text/plain"}, {"Host", Host}, @@ -317,7 +332,8 @@ should_get_att_without_accept_gzip_encoding(_, {Data, {_, _, AttUrl}}) -> should_get_att_with_accept_gzip_encoding(compressed, {Data, {_, _, AttUrl}}) -> ?_test(begin {ok, Code, Headers, Body} = test_request:get( - AttUrl, [{"Accept-Encoding", "gzip"}]), + AttUrl, [{"Accept-Encoding", "gzip"}] + ), ?assertEqual(200, Code), ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)), ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body))) @@ -325,7 +341,8 @@ should_get_att_with_accept_gzip_encoding(compressed, {Data, {_, _, AttUrl}}) -> should_get_att_with_accept_gzip_encoding({text, _}, {Data, {_, _, AttUrl}}) -> ?_test(begin {ok, Code, Headers, Body} = test_request:get( - AttUrl, [{"Accept-Encoding", "gzip"}]), + AttUrl, [{"Accept-Encoding", "gzip"}] + ), ?assertEqual(200, Code), ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)), ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body))) @@ -333,77 +350,98 @@ should_get_att_with_accept_gzip_encoding({text, _}, {Data, {_, _, AttUrl}}) -> should_get_att_with_accept_gzip_encoding({binary, _}, {Data, {_, _, AttUrl}}) -> ?_test(begin {ok, Code, Headers, Body} = test_request:get( - AttUrl, [{"Accept-Encoding", "gzip"}]), + AttUrl, [{"Accept-Encoding", "gzip"}] + ), ?assertEqual(200, Code), - ?assertEqual(undefined, - couch_util:get_value("Content-Encoding", Headers)), + ?assertEqual( + undefined, + couch_util:get_value("Content-Encoding", Headers) + ), ?assertEqual(Data, iolist_to_binary(Body)) end). should_get_att_with_accept_deflate_encoding(_, {Data, {_, _, AttUrl}}) -> ?_test(begin {ok, Code, Headers, Body} = test_request:get( - AttUrl, [{"Accept-Encoding", "deflate"}]), + AttUrl, [{"Accept-Encoding", "deflate"}] + ), ?assertEqual(200, Code), - ?assertEqual(undefined, - couch_util:get_value("Content-Encoding", Headers)), + ?assertEqual( + undefined, + couch_util:get_value("Content-Encoding", Headers) + ), ?assertEqual(Data, iolist_to_binary(Body)) end). should_return_406_response_on_unsupported_encoding(_, {_, {_, _, AttUrl}}) -> - ?_assertEqual(406, + ?_assertEqual( + 406, begin {ok, Code, _, _} = test_request:get( - AttUrl, [{"Accept-Encoding", "deflate, *;q=0"}]), + AttUrl, [{"Accept-Encoding", "deflate, *;q=0"}] + ), Code - end). + end + ). should_get_doc_with_att_data(compressed, {Data, {_, DocUrl, _}}) -> ?_test(begin Url = DocUrl ++ "?attachments=true", {ok, Code, _, Body} = test_request:get( - Url, [{"Accept", "application/json"}]), + Url, [{"Accept", "application/json"}] + ), ?assertEqual(200, Code), Json = jiffy:decode(Body), AttJson = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_TXT_NAME]), + Json, [<<"_attachments">>, ?ATT_TXT_NAME] + ), AttData = couch_util:get_nested_json_value( - AttJson, [<<"data">>]), + AttJson, [<<"data">>] + ), ?assertEqual( <<"text/plain">>, - couch_util:get_nested_json_value(AttJson,[<<"content_type">>])), + couch_util:get_nested_json_value(AttJson, [<<"content_type">>]) + ), ?assertEqual(Data, base64:decode(AttData)) end); should_get_doc_with_att_data({text, _}, {Data, {_, DocUrl, _}}) -> ?_test(begin Url = DocUrl ++ "?attachments=true", {ok, Code, _, Body} = test_request:get( - Url, [{"Accept", "application/json"}]), + Url, [{"Accept", "application/json"}] + ), ?assertEqual(200, Code), Json = jiffy:decode(Body), AttJson = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_TXT_NAME]), + Json, [<<"_attachments">>, ?ATT_TXT_NAME] + ), AttData = couch_util:get_nested_json_value( - AttJson, [<<"data">>]), + AttJson, [<<"data">>] + ), ?assertEqual( <<"text/plain">>, - couch_util:get_nested_json_value(AttJson,[<<"content_type">>])), + couch_util:get_nested_json_value(AttJson, [<<"content_type">>]) + ), ?assertEqual(Data, base64:decode(AttData)) end); should_get_doc_with_att_data({binary, _}, {Data, {_, DocUrl, _}}) -> ?_test(begin Url = DocUrl ++ "?attachments=true", {ok, Code, _, Body} = test_request:get( - Url, [{"Accept", "application/json"}]), + Url, [{"Accept", "application/json"}] + ), ?assertEqual(200, Code), Json = jiffy:decode(Body), AttJson = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_BIN_NAME]), + Json, [<<"_attachments">>, ?ATT_BIN_NAME] + ), AttData = couch_util:get_nested_json_value( - AttJson, [<<"data">>]), + AttJson, [<<"data">>] + ), ?assertEqual( <<"image/png">>, - couch_util:get_nested_json_value(AttJson,[<<"content_type">>])), + couch_util:get_nested_json_value(AttJson, [<<"content_type">>]) + ), ?assertEqual(Data, base64:decode(AttData)) end). @@ -411,13 +449,17 @@ should_get_doc_with_att_data_stub(compressed, {Data, {_, DocUrl, _}}) -> ?_test(begin Url = DocUrl ++ "?att_encoding_info=true", {ok, Code, _, Body} = test_request:get( - Url, [{"Accept", "application/json"}]), + Url, [{"Accept", "application/json"}] + ), ?assertEqual(200, Code), Json = jiffy:decode(Body), {AttJson} = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_TXT_NAME]), - ?assertEqual(<<"gzip">>, - couch_util:get_value(<<"encoding">>, AttJson)), + Json, [<<"_attachments">>, ?ATT_TXT_NAME] + ), + ?assertEqual( + <<"gzip">>, + couch_util:get_value(<<"encoding">>, AttJson) + ), AttLength = couch_util:get_value(<<"length">>, AttJson), EncLength = couch_util:get_value(<<"encoded_length">>, AttJson), ?assertEqual(AttLength, EncLength), @@ -427,38 +469,55 @@ should_get_doc_with_att_data_stub({text, _}, {Data, {_, DocUrl, _}}) -> ?_test(begin Url = DocUrl ++ "?att_encoding_info=true", {ok, Code, _, Body} = test_request:get( - Url, [{"Accept", "application/json"}]), + Url, [{"Accept", "application/json"}] + ), ?assertEqual(200, Code), Json = jiffy:decode(Body), {AttJson} = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_TXT_NAME]), - ?assertEqual(<<"gzip">>, - couch_util:get_value(<<"encoding">>, AttJson)), + Json, [<<"_attachments">>, ?ATT_TXT_NAME] + ), + ?assertEqual( + <<"gzip">>, + couch_util:get_value(<<"encoding">>, AttJson) + ), AttEncLength = iolist_size(gzip(Data)), - ?assertEqual(AttEncLength, - couch_util:get_value(<<"encoded_length">>, AttJson)), - ?assertEqual(byte_size(Data), - couch_util:get_value(<<"length">>, AttJson)) + ?assertEqual( + AttEncLength, + couch_util:get_value(<<"encoded_length">>, AttJson) + ), + ?assertEqual( + byte_size(Data), + couch_util:get_value(<<"length">>, AttJson) + ) end); should_get_doc_with_att_data_stub({binary, _}, {Data, {_, DocUrl, _}}) -> ?_test(begin Url = DocUrl ++ "?att_encoding_info=true", {ok, Code, _, Body} = test_request:get( - Url, [{"Accept", "application/json"}]), + Url, [{"Accept", "application/json"}] + ), ?assertEqual(200, Code), Json = jiffy:decode(Body), {AttJson} = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_BIN_NAME]), - ?assertEqual(undefined, - couch_util:get_value(<<"encoding">>, AttJson)), - ?assertEqual(undefined, - couch_util:get_value(<<"encoded_length">>, AttJson)), - ?assertEqual(byte_size(Data), - couch_util:get_value(<<"length">>, AttJson)) + Json, [<<"_attachments">>, ?ATT_BIN_NAME] + ), + ?assertEqual( + undefined, + couch_util:get_value(<<"encoding">>, AttJson) + ), + ?assertEqual( + undefined, + couch_util:get_value(<<"encoded_length">>, AttJson) + ), + ?assertEqual( + byte_size(Data), + couch_util:get_value(<<"length">>, AttJson) + ) end). should_not_create_compressed_att_with_deflate_encoding({Host, DbName}) -> - ?_assertEqual(415, + ?_assertEqual( + 415, begin HttpHost = "http://" ++ Host, AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"), @@ -470,14 +529,16 @@ should_not_create_compressed_att_with_deflate_encoding({Host, DbName}) -> ], {ok, Code, _, _} = test_request:put(AttUrl, Headers, Body), Code - end). + end + ). should_not_create_compressed_att_with_compress_encoding({Host, DbName}) -> % Note: As of OTP R13B04, it seems there's no LZW compression % (i.e. UNIX compress utility implementation) lib in OTP. % However there's a simple working Erlang implementation at: % http://scienceblogs.com/goodmath/2008/01/simple_lempelziv_compression_i.php - ?_assertEqual(415, + ?_assertEqual( + 415, begin HttpHost = "http://" ++ Host, AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"), @@ -488,33 +549,42 @@ should_not_create_compressed_att_with_compress_encoding({Host, DbName}) -> ], {ok, Code, _, _} = test_request:put(AttUrl, Headers, Data), Code - end). + end + ). should_create_compressible_att_with_ctype_params({Host, DbName}) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(begin - HttpHost = "http://" ++ Host, - DocUrl = string:join([HttpHost, DbName, ?docid()], "/"), - AttUrl = string:join([DocUrl, ?b2l(?ATT_TXT_NAME)], "/"), - {ok, Data} = file:read_file(?FIXTURE_TXT), - Headers = [{"Content-Type", "text/plain; charset=UTF-8"}], - {ok, Code0, _, _} = test_request:put(AttUrl, Headers, Data), - ?assertEqual(201, Code0), - - {ok, Code1, _, Body} = test_request:get( - DocUrl ++ "?att_encoding_info=true"), - ?assertEqual(200, Code1), - Json = jiffy:decode(Body), - {AttJson} = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_TXT_NAME]), - ?assertEqual(<<"gzip">>, - couch_util:get_value(<<"encoding">>, AttJson)), - AttEncLength = iolist_size(gzip(Data)), - ?assertEqual(AttEncLength, - couch_util:get_value(<<"encoded_length">>, AttJson)), - ?assertEqual(byte_size(Data), - couch_util:get_value(<<"length">>, AttJson)) - end)}. - + {timeout, ?TIMEOUT_EUNIT, + ?_test(begin + HttpHost = "http://" ++ Host, + DocUrl = string:join([HttpHost, DbName, ?docid()], "/"), + AttUrl = string:join([DocUrl, ?b2l(?ATT_TXT_NAME)], "/"), + {ok, Data} = file:read_file(?FIXTURE_TXT), + Headers = [{"Content-Type", "text/plain; charset=UTF-8"}], + {ok, Code0, _, _} = test_request:put(AttUrl, Headers, Data), + ?assertEqual(201, Code0), + + {ok, Code1, _, Body} = test_request:get( + DocUrl ++ "?att_encoding_info=true" + ), + ?assertEqual(200, Code1), + Json = jiffy:decode(Body), + {AttJson} = couch_util:get_nested_json_value( + Json, [<<"_attachments">>, ?ATT_TXT_NAME] + ), + ?assertEqual( + <<"gzip">>, + couch_util:get_value(<<"encoding">>, AttJson) + ), + AttEncLength = iolist_size(gzip(Data)), + ?assertEqual( + AttEncLength, + couch_util:get_value(<<"encoded_length">>, AttJson) + ), + ?assertEqual( + byte_size(Data), + couch_util:get_value(<<"length">>, AttJson) + ) + end)}. compact_after_lowering_attachment_size_limit_test_() -> { @@ -539,33 +609,33 @@ compact_after_lowering_attachment_size_limit_test_() -> } }. - should_compact_after_lowering_attachment_size_limit({_Ctx, DbName}) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(begin - {ok, Db1} = couch_db:open(DbName, [?ADMIN_CTX]), - Doc1 = #doc{id = <<"doc1">>, atts = att(1000)}, - {ok, _} = couch_db:update_doc(Db1, Doc1, []), - couch_db:close(Db1), - config:set("couchdb", "max_attachment_size", "1", _Persist = false), - compact_db(DbName), - {ok, Db2} = couch_db:open_int(DbName, []), - {ok, Doc2} = couch_db:open_doc(Db2, <<"doc1">>), - couch_db:close(Db2), - [Att] = Doc2#doc.atts, - ?assertEqual(1000, couch_att:fetch(att_len, Att)) - end)}. - + {timeout, ?TIMEOUT_EUNIT, + ?_test(begin + {ok, Db1} = couch_db:open(DbName, [?ADMIN_CTX]), + Doc1 = #doc{id = <<"doc1">>, atts = att(1000)}, + {ok, _} = couch_db:update_doc(Db1, Doc1, []), + couch_db:close(Db1), + config:set("couchdb", "max_attachment_size", "1", _Persist = false), + compact_db(DbName), + {ok, Db2} = couch_db:open_int(DbName, []), + {ok, Doc2} = couch_db:open_doc(Db2, <<"doc1">>), + couch_db:close(Db2), + [Att] = Doc2#doc.atts, + ?assertEqual(1000, couch_att:fetch(att_len, Att)) + end)}. att(Size) when is_integer(Size), Size >= 1 -> - [couch_att:new([ - {name, <<"att">>}, - {type, <<"app/binary">>}, - {att_len, Size}, - {data, fun(_Bytes) -> - << <<"x">> || _ <- lists:seq(1, Size) >> - end} - ])]. - + [ + couch_att:new([ + {name, <<"att">>}, + {type, <<"app/binary">>}, + {att_len, Size}, + {data, fun(_Bytes) -> + <<<<"x">> || _ <- lists:seq(1, Size)>> + end} + ]) + ]. compact_db(DbName) -> {ok, Db} = couch_db:open_int(DbName, []), @@ -573,34 +643,35 @@ compact_db(DbName) -> wait_compaction(DbName, "database", ?LINE), ok = couch_db:close(Db). - wait_compaction(DbName, Kind, Line) -> WaitFun = fun() -> - case is_compaction_running(DbName) of - true -> wait; - false -> ok - end + case is_compaction_running(DbName) of + true -> wait; + false -> ok + end end, case test_util:wait(WaitFun, ?TIMEOUT) of timeout -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, Line}, - {reason, "Timeout waiting for " - ++ Kind - ++ " database compaction"}]}); + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, Line}, + {reason, + "Timeout waiting for " ++ + Kind ++ + " database compaction"} + ]} + ); _ -> ok end. - is_compaction_running(DbName) -> {ok, Db} = couch_db:open_int(DbName, []), {ok, DbInfo} = couch_db:get_db_info(Db), couch_db:close(Db), couch_util:get_value(compact_running, DbInfo) =:= true. - internal_replication_after_lowering_attachment_size_limit_test_() -> { "Internal replication after lowering max attachment size", @@ -629,23 +700,23 @@ internal_replication_after_lowering_attachment_size_limit_test_() -> }. should_replicate_after_lowering_attachment_size({_Ctx, SrcName, TgtName}) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(begin - {ok, SrcDb} = couch_db:open(SrcName, [?ADMIN_CTX]), - SrcDoc = #doc{id = <<"doc">>, atts = att(1000)}, - {ok, _} = couch_db:update_doc(SrcDb, SrcDoc, []), - couch_db:close(SrcDb), - config:set("couchdb", "max_attachment_size", "1", _Persist = false), - % Create a pair of "fake" shards - SrcShard = #shard{name = SrcName, node = node()}, - TgtShard = #shard{name = TgtName, node = node()}, - mem3_rep:go(SrcShard, TgtShard, []), - {ok, TgtDb} = couch_db:open_int(TgtName, []), - {ok, TgtDoc} = couch_db:open_doc(TgtDb, <<"doc">>), - couch_db:close(TgtDb), - [Att] = TgtDoc#doc.atts, - ?assertEqual(1000, couch_att:fetch(att_len, Att)) - end)}. - + {timeout, ?TIMEOUT_EUNIT, + ?_test(begin + {ok, SrcDb} = couch_db:open(SrcName, [?ADMIN_CTX]), + SrcDoc = #doc{id = <<"doc">>, atts = att(1000)}, + {ok, _} = couch_db:update_doc(SrcDb, SrcDoc, []), + couch_db:close(SrcDb), + config:set("couchdb", "max_attachment_size", "1", _Persist = false), + % Create a pair of "fake" shards + SrcShard = #shard{name = SrcName, node = node()}, + TgtShard = #shard{name = TgtName, node = node()}, + mem3_rep:go(SrcShard, TgtShard, []), + {ok, TgtDb} = couch_db:open_int(TgtName, []), + {ok, TgtDoc} = couch_db:open_doc(TgtDb, <<"doc">>), + couch_db:close(TgtDb), + [Att] = TgtDoc#doc.atts, + ?assertEqual(1000, couch_att:fetch(att_len, Att)) + end)}. get_json(Json, Path) -> couch_util:get_nested_json_value(Json, Path). @@ -684,12 +755,15 @@ bind_address() -> request(Method, Url, Headers, Body) -> RequestHead = [Method, " ", Url, " HTTP/1.1"], - RequestHeaders = [[string:join([Key, Value], ": "), "\r\n"] - || {Key, Value} <- Headers], + RequestHeaders = [ + [string:join([Key, Value], ": "), "\r\n"] + || {Key, Value} <- Headers + ], Request = [RequestHead, "\r\n", RequestHeaders, "\r\n", Body], Sock = get_socket(), gen_tcp:send(Sock, list_to_binary(lists:flatten(Request))), - timer:sleep(?TIMEWAIT), % must wait to receive complete response + % must wait to receive complete response + timer:sleep(?TIMEWAIT), {ok, R} = gen_tcp:recv(Sock, 0), gen_tcp:close(Sock), [Header, Body1] = re:split(R, "\r\n\r\n", [{return, binary}]), @@ -702,7 +776,8 @@ create_standalone_text_att(Host, DbName) -> {ok, Data} = file:read_file(?FIXTURE_TXT), Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"), {ok, Code, _Headers, _Body} = test_request:put( - Url, [{"Content-Type", "text/plain"}], Data), + Url, [{"Content-Type", "text/plain"}], Data + ), ?assertEqual(201, Code), Url. @@ -710,39 +785,48 @@ create_standalone_png_att(Host, DbName) -> {ok, Data} = file:read_file(?FIXTURE_PNG), Url = string:join([Host, DbName, "doc", ?b2l(?ATT_BIN_NAME)], "/"), {ok, Code, _Headers, _Body} = test_request:put( - Url, [{"Content-Type", "image/png"}], Data), + Url, [{"Content-Type", "image/png"}], Data + ), ?assertEqual(201, Code), Url. create_inline_text_att(Host, DbName) -> {ok, Data} = file:read_file(?FIXTURE_TXT), Url = string:join([Host, DbName, "doc"], "/"), - Doc = {[ - {<<"_attachments">>, {[ - {?ATT_TXT_NAME, {[ - {<<"content_type">>, <<"text/plain">>}, - {<<"data">>, base64:encode(Data)} - ]} - }]}} - ]}, + Doc = + {[ + {<<"_attachments">>, + {[ + {?ATT_TXT_NAME, + {[ + {<<"content_type">>, <<"text/plain">>}, + {<<"data">>, base64:encode(Data)} + ]}} + ]}} + ]}, {ok, Code, _Headers, _Body} = test_request:put( - Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)), + Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc) + ), ?assertEqual(201, Code), string:join([Url, ?b2l(?ATT_TXT_NAME)], "/"). create_inline_png_att(Host, DbName) -> {ok, Data} = file:read_file(?FIXTURE_PNG), Url = string:join([Host, DbName, "doc"], "/"), - Doc = {[ - {<<"_attachments">>, {[ - {?ATT_BIN_NAME, {[ - {<<"content_type">>, <<"image/png">>}, - {<<"data">>, base64:encode(Data)} - ]} - }]}} - ]}, + Doc = + {[ + {<<"_attachments">>, + {[ + {?ATT_BIN_NAME, + {[ + {<<"content_type">>, <<"image/png">>}, + {<<"data">>, base64:encode(Data)} + ]}} + ]}} + ]}, {ok, Code, _Headers, _Body} = test_request:put( - Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)), + Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc) + ), ?assertEqual(201, Code), string:join([Url, ?b2l(?ATT_BIN_NAME)], "/"). @@ -750,8 +834,10 @@ create_already_compressed_att(Host, DbName) -> {ok, Data} = file:read_file(?FIXTURE_TXT), Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"), {ok, Code, _Headers, _Body} = test_request:put( - Url, [{"Content-Type", "text/plain"}, {"Content-Encoding", "gzip"}], - zlib:gzip(Data)), + Url, + [{"Content-Type", "text/plain"}, {"Content-Encoding", "gzip"}], + zlib:gzip(Data) + ), ?assertEqual(201, Code), Url. diff --git a/src/couch/test/eunit/couchdb_auth_tests.erl b/src/couch/test/eunit/couchdb_auth_tests.erl index 19d32d0c5..dfb22dc25 100644 --- a/src/couch/test/eunit/couchdb_auth_tests.erl +++ b/src/couch/test/eunit/couchdb_auth_tests.erl @@ -14,23 +14,21 @@ -include_lib("couch/include/couch_eunit.hrl"). - setup(PortType) -> Hashed = couch_passwords:hash_admin_password("artischocko"), - ok = config:set("admins", "rocko", binary_to_list(Hashed), _Persist=false), + ok = config:set("admins", "rocko", binary_to_list(Hashed), _Persist = false), Addr = config:get("httpd", "bind_address", "127.0.0.1"), lists:concat(["http://", Addr, ":", port(PortType), "/_session"]). setup_require_valid_user(PortType) -> - ok = config:set("chttpd", "require_valid_user", "true", _Persist=false), + ok = config:set("chttpd", "require_valid_user", "true", _Persist = false), setup(PortType). teardown(_, _) -> ok. teardown_require_valid_user(_, _) -> - config:set("chttpd", "require_valid_user", "false", _Persist=false). - + config:set("chttpd", "require_valid_user", "false", _Persist = false). auth_test_() -> Tests = [ @@ -46,7 +44,8 @@ auth_test_() -> "Auth tests", { setup, - fun() -> test_util:start_couch([chttpd]) end, fun test_util:stop_couch/1, + fun() -> test_util:start_couch([chttpd]) end, + fun test_util:stop_couch/1, [ make_test_cases(clustered, Tests), make_test_cases(backdoor, Tests), @@ -64,49 +63,67 @@ make_test_cases(Mod, Funs) -> make_require_valid_user_test_cases(Mod, Funs) -> { lists:flatten(io_lib:format("~s require_valid_user=true", [Mod])), - {foreachx, fun setup_require_valid_user/1, fun teardown_require_valid_user/2, - [{Mod, Fun} || Fun <- Funs]} + {foreachx, fun setup_require_valid_user/1, fun teardown_require_valid_user/2, [ + {Mod, Fun} + || Fun <- Funs + ]} }. should_return_username_on_post_to_session(_PortType, Url) -> - ?_assertEqual(<<"rocko">>, + ?_assertEqual( + <<"rocko">>, begin Hashed = couch_passwords:hash_admin_password(<<"artischocko">>), ok = config:set("admins", "rocko", binary_to_list(Hashed), false), - {ok, _, _, Body} = test_request:post(Url, [{"Content-Type", "application/json"}], - "{\"name\":\"rocko\", \"password\":\"artischocko\"}"), + {ok, _, _, Body} = test_request:post( + Url, + [{"Content-Type", "application/json"}], + "{\"name\":\"rocko\", \"password\":\"artischocko\"}" + ), {Json} = jiffy:decode(Body), proplists:get_value(<<"name">>, Json) - end). + end + ). should_not_return_authenticated_field(_PortType, Url) -> - ?_assertThrow({not_found, _}, + ?_assertThrow( + {not_found, _}, begin couch_util:get_nested_json_value(session(Url), [ - <<"info">>, <<"authenticated">>]) - end). + <<"info">>, <<"authenticated">> + ]) + end + ). should_return_list_of_handlers(backdoor, Url) -> - ?_assertEqual([<<"cookie">>,<<"default">>], + ?_assertEqual( + [<<"cookie">>, <<"default">>], begin couch_util:get_nested_json_value(session(Url), [ - <<"info">>, <<"authentication_handlers">>]) - end); + <<"info">>, <<"authentication_handlers">> + ]) + end + ); should_return_list_of_handlers(clustered, Url) -> - ?_assertEqual([<<"cookie">>,<<"default">>], + ?_assertEqual( + [<<"cookie">>, <<"default">>], begin couch_util:get_nested_json_value(session(Url), [ - <<"info">>, <<"authentication_handlers">>]) - end). - + <<"info">>, <<"authentication_handlers">> + ]) + end + ). %% ------------------------------------------------------------------ %% Internal Function Definitions %% ------------------------------------------------------------------ session(Url) -> - {ok, _, _, Body} = test_request:get(Url, [{"Content-Type", "application/json"}], - "{\"name\":\"rocko\", \"password\":\"artischocko\"}"), + {ok, _, _, Body} = test_request:get( + Url, + [{"Content-Type", "application/json"}], + "{\"name\":\"rocko\", \"password\":\"artischocko\"}" + ), jiffy:decode(Body). port(clustered) -> diff --git a/src/couch/test/eunit/couchdb_cookie_domain_tests.erl b/src/couch/test/eunit/couchdb_cookie_domain_tests.erl index c46352f35..17c41dafe 100755 --- a/src/couch/test/eunit/couchdb_cookie_domain_tests.erl +++ b/src/couch/test/eunit/couchdb_cookie_domain_tests.erl @@ -21,7 +21,7 @@ setup() -> Ctx = test_util:start_couch([chttpd]), Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false), Addr = config:get("httpd", "bind_address", "127.0.0.1"), Port = mochiweb_socket_server:get(chttpd, port), Url = ?l2b(io_lib:format("http://~s:~b/_session", [Addr, Port])), @@ -30,7 +30,7 @@ setup() -> {ok, ?b2l(Url), ContentType, ?b2l(Payload), Ctx}. teardown({ok, _, _, _, Ctx}) -> - ok = config:delete("admins", ?USER, _Persist=false), + ok = config:delete("admins", ?USER, _Persist = false), test_util:stop_couch(Ctx). cookie_test_() -> @@ -52,8 +52,12 @@ cookie_test_() -> should_set_cookie_domain(Url, ContentType, Payload) -> ?_test(begin - ok = config:set("couch_httpd_auth", "cookie_domain", - "example.com", false), + ok = config:set( + "couch_httpd_auth", + "cookie_domain", + "example.com", + false + ), {ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload), ?assertEqual(200, Code), Cookie = proplists:get_value("Set-Cookie", Headers), @@ -71,8 +75,12 @@ should_not_set_cookie_domain(Url, ContentType, Payload) -> should_delete_cookie_domain(Url, ContentType, Payload) -> ?_test(begin - ok = config:set("couch_httpd_auth", "cookie_domain", - "example.com", false), + ok = config:set( + "couch_httpd_auth", + "cookie_domain", + "example.com", + false + ), {ok, Code, Headers, _} = test_request:delete(Url, ContentType, Payload), ?assertEqual(200, Code), Cookie = proplists:get_value("Set-Cookie", Headers), diff --git a/src/couch/test/eunit/couchdb_cors_tests.erl b/src/couch/test/eunit/couchdb_cors_tests.erl index 8ec61cc8a..dce07fd28 100644 --- a/src/couch/test/eunit/couchdb_cors_tests.erl +++ b/src/couch/test/eunit/couchdb_cors_tests.erl @@ -20,10 +20,12 @@ -define(TIMEOUT, 1000). -define(_assertEqualLists(A, B), - ?_assertEqual(lists:usort(A), lists:usort(B))). + ?_assertEqual(lists:usort(A), lists:usort(B)) +). -define(assertEqualLists(A, B), - ?assertEqual(lists:usort(A), lists:usort(B))). + ?assertEqual(lists:usort(A), lists:usort(B)) +). start() -> Ctx = test_util:start_couch([ioq]), @@ -46,14 +48,16 @@ setup() -> setup({Mod, VHost}) -> {Host, DbName} = setup(), - Url = case Mod of - server -> - Host; - db -> - Host ++ "/" ++ DbName - end, - DefaultHeaders = [{"Origin", "http://example.com"}] - ++ maybe_append_vhost(VHost), + Url = + case Mod of + server -> + Host; + db -> + Host ++ "/" ++ DbName + end, + DefaultHeaders = + [{"Origin", "http://example.com"}] ++ + maybe_append_vhost(VHost), {Host, DbName, Url, DefaultHeaders}. teardown(DbName) when is_list(DbName) -> @@ -65,7 +69,6 @@ teardown({_, DbName}) -> teardown(_, {_, DbName, _, _}) -> teardown(DbName). - cors_test_() -> Funs = [ fun should_not_allow_origin/2, @@ -85,7 +88,8 @@ cors_test_() -> "CORS (COUCHDB-431)", { setup, - fun start/0, fun test_util:stop_couch/1, + fun start/0, + fun test_util:stop_couch/1, [ cors_tests(Funs), vhost_cors_tests(Funs), @@ -99,7 +103,8 @@ headers_tests() -> "Various headers tests", { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_not_return_cors_headers_for_invalid_origin/1, fun should_not_return_cors_headers_for_invalid_origin_preflight/1, @@ -130,206 +135,288 @@ vhost_cors_tests(Funs) -> make_test_case(Mod, UseVhost, Funs) -> { - case Mod of server -> "Server"; db -> "Database" end, - {foreachx, fun setup/1, fun teardown/2, [{{Mod, UseVhost}, Fun} - || Fun <- Funs]} + case Mod of + server -> "Server"; + db -> "Database" + end, + {foreachx, fun setup/1, fun teardown/2, [ + {{Mod, UseVhost}, Fun} + || Fun <- Funs + ]} }. - should_not_allow_origin(_, {_, _, Url, Headers0}) -> - ?_assertEqual(undefined, + ?_assertEqual( + undefined, begin config:delete("cors", "origins", false), Headers1 = proplists:delete("Origin", Headers0), - Headers = [{"Origin", "http://127.0.0.1"}] - ++ Headers1, + Headers = + [{"Origin", "http://127.0.0.1"}] ++ + Headers1, {ok, _, Resp, _} = test_request:get(Url, Headers), proplists:get_value("Access-Control-Allow-Origin", Resp) - end). + end + ). should_not_allow_origin_with_port_mismatch({_, VHost}, {_, _, Url, _}) -> - ?_assertEqual(undefined, + ?_assertEqual( + undefined, begin - Headers = [{"Origin", "http://example.com:5984"}, - {"Access-Control-Request-Method", "GET"}] - ++ maybe_append_vhost(VHost), + Headers = + [ + {"Origin", "http://example.com:5984"}, + {"Access-Control-Request-Method", "GET"} + ] ++ + maybe_append_vhost(VHost), {ok, _, Resp, _} = test_request:options(Url, Headers), proplists:get_value("Access-Control-Allow-Origin", Resp) - end). + end + ). should_not_allow_origin_with_scheme_mismatch({_, VHost}, {_, _, Url, _}) -> - ?_assertEqual(undefined, + ?_assertEqual( + undefined, begin - Headers = [{"Origin", "http://example.com:5984"}, - {"Access-Control-Request-Method", "GET"}] - ++ maybe_append_vhost(VHost), + Headers = + [ + {"Origin", "http://example.com:5984"}, + {"Access-Control-Request-Method", "GET"} + ] ++ + maybe_append_vhost(VHost), {ok, _, Resp, _} = test_request:options(Url, Headers), proplists:get_value("Access-Control-Allow-Origin", Resp) - end). + end + ). should_not_all_origin_due_case_mismatch({_, VHost}, {_, _, Url, _}) -> - ?_assertEqual(undefined, + ?_assertEqual( + undefined, begin - Headers = [{"Origin", "http://ExAmPlE.CoM"}, - {"Access-Control-Request-Method", "GET"}] - ++ maybe_append_vhost(VHost), + Headers = + [ + {"Origin", "http://ExAmPlE.CoM"}, + {"Access-Control-Request-Method", "GET"} + ] ++ + maybe_append_vhost(VHost), {ok, _, Resp, _} = test_request:options(Url, Headers), proplists:get_value("Access-Control-Allow-Origin", Resp) - end). + end + ). should_make_simple_request(_, {_, _, Url, DefaultHeaders}) -> ?_test(begin {ok, _, Resp, _} = test_request:get(Url, DefaultHeaders), ?assertEqual( undefined, - proplists:get_value("Access-Control-Allow-Credentials", Resp)), + proplists:get_value("Access-Control-Allow-Credentials", Resp) + ), ?assertEqual( "http://example.com", - proplists:get_value("Access-Control-Allow-Origin", Resp)), + proplists:get_value("Access-Control-Allow-Origin", Resp) + ), ?assertEqualLists( ?COUCH_HEADERS ++ list_simple_headers(Resp), - split_list(proplists:get_value("Access-Control-Expose-Headers", Resp))) + split_list(proplists:get_value("Access-Control-Expose-Headers", Resp)) + ) end). should_make_preflight_request(_, {_, _, Url, DefaultHeaders}) -> - ?_assertEqualLists(?SUPPORTED_METHODS, + ?_assertEqualLists( + ?SUPPORTED_METHODS, begin - Headers = DefaultHeaders - ++ [{"Access-Control-Request-Method", "GET"}], + Headers = + DefaultHeaders ++ + [{"Access-Control-Request-Method", "GET"}], {ok, _, Resp, _} = test_request:options(Url, Headers), split_list(proplists:get_value("Access-Control-Allow-Methods", Resp)) - end). + end + ). should_make_prefligh_request_with_port({_, VHost}, {_, _, Url, _}) -> - ?_assertEqual("http://example.com:5984", + ?_assertEqual( + "http://example.com:5984", begin - config:set("cors", "origins", "http://example.com:5984", - false), - Headers = [{"Origin", "http://example.com:5984"}, - {"Access-Control-Request-Method", "GET"}] - ++ maybe_append_vhost(VHost), + config:set( + "cors", + "origins", + "http://example.com:5984", + false + ), + Headers = + [ + {"Origin", "http://example.com:5984"}, + {"Access-Control-Request-Method", "GET"} + ] ++ + maybe_append_vhost(VHost), {ok, _, Resp, _} = test_request:options(Url, Headers), proplists:get_value("Access-Control-Allow-Origin", Resp) - end). + end + ). should_make_prefligh_request_with_scheme({_, VHost}, {_, _, Url, _}) -> - ?_assertEqual("https://example.com:5984", + ?_assertEqual( + "https://example.com:5984", begin - config:set("cors", "origins", "https://example.com:5984", - false), - Headers = [{"Origin", "https://example.com:5984"}, - {"Access-Control-Request-Method", "GET"}] - ++ maybe_append_vhost(VHost), + config:set( + "cors", + "origins", + "https://example.com:5984", + false + ), + Headers = + [ + {"Origin", "https://example.com:5984"}, + {"Access-Control-Request-Method", "GET"} + ] ++ + maybe_append_vhost(VHost), {ok, _, Resp, _} = test_request:options(Url, Headers), proplists:get_value("Access-Control-Allow-Origin", Resp) - end). + end + ). should_make_prefligh_request_with_wildcard_origin({_, VHost}, {_, _, Url, _}) -> - ?_assertEqual("https://example.com:5984", + ?_assertEqual( + "https://example.com:5984", begin config:set("cors", "origins", "*", false), - Headers = [{"Origin", "https://example.com:5984"}, - {"Access-Control-Request-Method", "GET"}] - ++ maybe_append_vhost(VHost), + Headers = + [ + {"Origin", "https://example.com:5984"}, + {"Access-Control-Request-Method", "GET"} + ] ++ + maybe_append_vhost(VHost), {ok, _, Resp, _} = test_request:options(Url, Headers), proplists:get_value("Access-Control-Allow-Origin", Resp) - end). + end + ). should_make_request_with_credentials(_, {_, _, Url, DefaultHeaders}) -> - ?_assertEqual("true", + ?_assertEqual( + "true", begin ok = config:set("cors", "credentials", "true", false), {ok, _, Resp, _} = test_request:options(Url, DefaultHeaders), proplists:get_value("Access-Control-Allow-Credentials", Resp) - end). + end + ). should_make_origin_request_with_auth(_, {_, _, Url, DefaultHeaders}) -> - ?_assertEqual("http://example.com", + ?_assertEqual( + "http://example.com", begin Hashed = couch_passwords:hash_admin_password(<<"test">>), config:set("admins", "test", ?b2l(Hashed), false), {ok, _, Resp, _} = test_request:get( - Url, DefaultHeaders, [{basic_auth, {"test", "test"}}]), + Url, DefaultHeaders, [{basic_auth, {"test", "test"}}] + ), config:delete("admins", "test", false), proplists:get_value("Access-Control-Allow-Origin", Resp) - end). + end + ). should_make_preflight_request_with_auth(_, {_, _, Url, DefaultHeaders}) -> - ?_assertEqualLists(?SUPPORTED_METHODS, + ?_assertEqualLists( + ?SUPPORTED_METHODS, begin Hashed = couch_passwords:hash_admin_password(<<"test">>), config:set("admins", "test", ?b2l(Hashed), false), - Headers = DefaultHeaders - ++ [{"Access-Control-Request-Method", "GET"}], + Headers = + DefaultHeaders ++ + [{"Access-Control-Request-Method", "GET"}], {ok, _, Resp, _} = test_request:options( - Url, Headers, [{basic_auth, {"test", "test"}}]), + Url, Headers, [{basic_auth, {"test", "test"}}] + ), config:delete("admins", "test", false), split_list(proplists:get_value("Access-Control-Allow-Methods", Resp)) - end). + end + ). should_not_return_cors_headers_for_invalid_origin({Host, _}) -> - ?_assertEqual(undefined, + ?_assertEqual( + undefined, begin Headers = [{"Origin", "http://127.0.0.1"}], {ok, _, Resp, _} = test_request:get(Host, Headers), proplists:get_value("Access-Control-Allow-Origin", Resp) - end). + end + ). should_not_return_cors_headers_for_invalid_origin_preflight({Host, _}) -> - ?_assertEqual(undefined, + ?_assertEqual( + undefined, begin - Headers = [{"Origin", "http://127.0.0.1"}, - {"Access-Control-Request-Method", "GET"}], + Headers = [ + {"Origin", "http://127.0.0.1"}, + {"Access-Control-Request-Method", "GET"} + ], {ok, _, Resp, _} = test_request:options(Host, Headers), proplists:get_value("Access-Control-Allow-Origin", Resp) - end). + end + ). should_make_request_against_attachment({Host, DbName}) -> {"COUCHDB-1689", - ?_assertEqual(200, - begin - Url = Host ++ "/" ++ DbName, - {ok, Code0, _, _} = test_request:put( - Url ++ "/doc/file.txt", [{"Content-Type", "text/plain"}], - "hello, couch!"), - ?assert(Code0 =:= 201), - {ok, Code, _, _} = test_request:get( - Url ++ "/doc?attachments=true", - [{"Origin", "http://example.com"}]), - Code - end)}. + ?_assertEqual( + 200, + begin + Url = Host ++ "/" ++ DbName, + {ok, Code0, _, _} = test_request:put( + Url ++ "/doc/file.txt", + [{"Content-Type", "text/plain"}], + "hello, couch!" + ), + ?assert(Code0 =:= 201), + {ok, Code, _, _} = test_request:get( + Url ++ "/doc?attachments=true", + [{"Origin", "http://example.com"}] + ), + Code + end + )}. should_make_range_request_against_attachment({Host, DbName}) -> {"COUCHDB-1689", - ?_assertEqual(206, - begin - Url = Host ++ "/" ++ DbName, - {ok, Code0, _, _} = test_request:put( - Url ++ "/doc/file.txt", - [{"Content-Type", "application/octet-stream"}], - "hello, couch!"), - ?assert(Code0 =:= 201), - {ok, Code, _, _} = test_request:get( - Url ++ "/doc/file.txt", [{"Origin", "http://example.com"}, - {"Range", "bytes=0-6"}]), - Code - end)}. + ?_assertEqual( + 206, + begin + Url = Host ++ "/" ++ DbName, + {ok, Code0, _, _} = test_request:put( + Url ++ "/doc/file.txt", + [{"Content-Type", "application/octet-stream"}], + "hello, couch!" + ), + ?assert(Code0 =:= 201), + {ok, Code, _, _} = test_request:get( + Url ++ "/doc/file.txt", [ + {"Origin", "http://example.com"}, + {"Range", "bytes=0-6"} + ] + ), + Code + end + )}. should_make_request_with_if_none_match_header({Host, DbName}) -> {"COUCHDB-1697", - ?_assertEqual(304, - begin - Url = Host ++ "/" ++ DbName, - {ok, Code0, Headers0, _} = test_request:put( - Url ++ "/doc", [{"Content-Type", "application/json"}], "{}"), - ?assert(Code0 =:= 201), - ETag = proplists:get_value("ETag", Headers0), - {ok, Code, _, _} = test_request:get( - Url ++ "/doc", [{"Origin", "http://example.com"}, - {"If-None-Match", ETag}]), - Code - end)}. - + ?_assertEqual( + 304, + begin + Url = Host ++ "/" ++ DbName, + {ok, Code0, Headers0, _} = test_request:put( + Url ++ "/doc", [{"Content-Type", "application/json"}], "{}" + ), + ?assert(Code0 =:= 201), + ETag = proplists:get_value("ETag", Headers0), + {ok, Code, _, _} = test_request:get( + Url ++ "/doc", [ + {"Origin", "http://example.com"}, + {"If-None-Match", ETag} + ] + ), + Code + end + )}. maybe_append_vhost(true) -> [{"Host", "http://example.com"}]; diff --git a/src/couch/test/eunit/couchdb_db_tests.erl b/src/couch/test/eunit/couchdb_db_tests.erl index 338f2cd3c..2f6993576 100644 --- a/src/couch/test/eunit/couchdb_db_tests.erl +++ b/src/couch/test/eunit/couchdb_db_tests.erl @@ -21,12 +21,10 @@ setup() -> fabric:create_db(DbName), DbName. - teardown(DbName) -> (catch fabric:delete_db(DbName)), ok. - clustered_db_test_() -> { "Checking clustered db API", @@ -39,7 +37,8 @@ clustered_db_test_() -> "DB deletion", { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_close_deleted_db/1, fun should_kill_caller_from_load_validation_funs_for_deleted_db/1 @@ -50,7 +49,6 @@ clustered_db_test_() -> } }. - should_close_deleted_db(DbName) -> ?_test(begin [#shard{name = ShardName} | _] = mem3:shards(DbName), @@ -60,7 +58,7 @@ should_close_deleted_db(DbName) -> fabric:delete_db(DbName), receive {'DOWN', MonitorRef, _Type, _Pid, _Info} -> - ok + ok after 2000 -> throw(timeout_error) end, @@ -71,8 +69,7 @@ should_close_deleted_db(DbName) -> end end), ?assertEqual([], ets:lookup(couch_server:couch_dbs(DbName), DbName)) - end). - + end). should_kill_caller_from_load_validation_funs_for_deleted_db(DbName) -> ?_test(begin @@ -85,7 +82,7 @@ should_kill_caller_from_load_validation_funs_for_deleted_db(DbName) -> {'DOWN', MonitorRef, _Type, _Pid, _Info} -> ok after 2000 -> - throw(timeout_error) + throw(timeout_error) end, ?assertError(database_does_not_exist, couch_db:load_validation_funs(Db)) end). diff --git a/src/couch/test/eunit/couchdb_design_doc_tests.erl b/src/couch/test/eunit/couchdb_design_doc_tests.erl index 653a6cb17..c51d56f0b 100644 --- a/src/couch/test/eunit/couchdb_design_doc_tests.erl +++ b/src/couch/test/eunit/couchdb_design_doc_tests.erl @@ -25,21 +25,21 @@ setup() -> BaseUrl = "http://" ++ Addr ++ ":" ++ Port, {?b2l(DbName), BaseUrl}. - teardown({DbName, _}) -> couch_server:delete(?l2b(DbName), [?ADMIN_CTX]), ok. - design_list_test_() -> { "Check _list functionality", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_return_empty_when_plain_return/1, fun should_return_empty_when_no_docs/1 @@ -50,38 +50,50 @@ design_list_test_() -> should_return_empty_when_plain_return({DbName, BaseUrl}) -> ?_test(begin - ?assertEqual(<<>>, - query_text(BaseUrl, DbName, "foo", "_list/plain_return/simple_view")) + ?assertEqual( + <<>>, + query_text(BaseUrl, DbName, "foo", "_list/plain_return/simple_view") + ) end). should_return_empty_when_no_docs({DbName, BaseUrl}) -> ?_test(begin - ?assertEqual(<<>>, - query_text(BaseUrl, DbName, "foo", "_list/simple_render/simple_view")) + ?assertEqual( + <<>>, + query_text(BaseUrl, DbName, "foo", "_list/simple_render/simple_view") + ) end). create_design_doc(DbName, DDName) -> {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDName}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {<<"simple_view">>, {[ - {<<"map">>, <<"function(doc) {emit(doc._id, doc)}">> }, - {<<"reduce">>, <<"function (key, values, rereduce) {return sum(values);}">> } - ]}} - ]}}, - {<<"lists">>, {[ - {<<"plain_return">>, <<"function(head, req) {return;}">>}, - {<<"simple_render">>, <<"function(head, req) {var row; while(row=getRow()) {send(JSON.stringify(row)); }}">>} - ]}} - ]}), + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, DDName}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, + {[ + {<<"simple_view">>, + {[ + {<<"map">>, <<"function(doc) {emit(doc._id, doc)}">>}, + {<<"reduce">>, + <<"function (key, values, rereduce) {return sum(values);}">>} + ]}} + ]}}, + {<<"lists">>, + {[ + {<<"plain_return">>, <<"function(head, req) {return;}">>}, + {<<"simple_render">>, + <<"function(head, req) {var row; while(row=getRow()) {send(JSON.stringify(row)); }}">>} + ]}} + ]} + ), {ok, Rev} = couch_db:update_doc(Db, DDoc, []), couch_db:close(Db), Rev. query_text(BaseUrl, DbName, DDoc, Path) -> {ok, Code, _Headers, Body} = test_request:get( - BaseUrl ++ "/" ++ DbName ++ "/_design/" ++ DDoc ++ "/" ++ Path), + BaseUrl ++ "/" ++ DbName ++ "/_design/" ++ DDoc ++ "/" ++ Path + ), ?assertEqual(200, Code), Body. diff --git a/src/couch/test/eunit/couchdb_file_compression_tests.erl b/src/couch/test/eunit/couchdb_file_compression_tests.erl index 77250337c..75bf18a12 100644 --- a/src/couch/test/eunit/couchdb_file_compression_tests.erl +++ b/src/couch/test/eunit/couchdb_file_compression_tests.erl @@ -25,26 +25,27 @@ setup_all() -> DbName = ?tempdb(), {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), ok = populate_db(Db, ?DOCS_COUNT), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, ?DDOC_ID}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {<<"by_id">>, {[ - {<<"map">>, <<"function(doc){emit(doc._id, doc.string);}">>} + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, ?DDOC_ID}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, + {[ + {<<"by_id">>, + {[ + {<<"map">>, <<"function(doc){emit(doc._id, doc.string);}">>} + ]}} ]}} - ]} - } - ]}), + ]} + ), {ok, _} = couch_db:update_doc(Db, DDoc, []), ok = couch_db:close(Db), {Ctx, DbName}. - teardown_all({Ctx, DbName}) -> ok = couch_server:delete(DbName, [?ADMIN_CTX]), test_util:stop_couch(Ctx). - couch_file_compression_test_() -> { "CouchDB file compression tests", @@ -62,13 +63,11 @@ couch_file_compression_test_() -> } }. - should_use_none({_, DbName}) -> run_test(DbName, "none"). should_use_deflate_1({_, DbName}) -> run_test(DbName, "deflate_1"). should_use_deflate_9({_, DbName}) -> run_test(DbName, "deflate_9"). should_use_snappy({_, DbName}) -> run_test(DbName, "snappy"). - should_compare_compression_methods({_, DbName}) -> TestDb = setup_db(DbName), Name = "none > snappy > deflate_1 > deflate_9", @@ -78,7 +77,6 @@ should_compare_compression_methods({_, DbName}) -> couch_server:delete(TestDb, [?ADMIN_CTX]) end. - run_test(DbName, Comp) -> config:set("couchdb", "file_compression", Comp, false), Timeout = 5 + ?TIMEOUT, @@ -93,7 +91,6 @@ run_test(DbName, Comp) -> ok = couch_server:delete(TestDb, [?ADMIN_CTX]) end. - compare_methods(DbName) -> config:set("couchdb", "file_compression", "none", false), ExternalSizePreCompact = db_external_size(DbName), @@ -140,22 +137,23 @@ compare_methods(DbName) -> ?assert(ViewExternalSizeNone =:= ViewExternalSizeSnappy), ?assert(ViewExternalSizeNone =:= ViewExternalSizeDeflate9). - populate_db(_Db, NumDocs) when NumDocs =< 0 -> ok; populate_db(Db, NumDocs) -> Docs = lists:map( fun(_) -> - couch_doc:from_json_obj({[ - {<<"_id">>, couch_uuids:random()}, - {<<"string">>, ?l2b(lists:duplicate(1000, $X))} - ]}) + couch_doc:from_json_obj( + {[ + {<<"_id">>, couch_uuids:random()}, + {<<"string">>, ?l2b(lists:duplicate(1000, $X))} + ]} + ) end, - lists:seq(1, 500)), + lists:seq(1, 500) + ), {ok, _} = couch_db:update_docs(Db, Docs, []), populate_db(Db, NumDocs - 500). - setup_db(SrcDbName) -> TgtDbName = ?tempdb(), TgtDbFileName = binary_to_list(TgtDbName) ++ ".couch", @@ -167,7 +165,6 @@ setup_db(SrcDbName) -> refresh_index(TgtDbName), TgtDbName. - refresh_index(DbName) -> {ok, Db} = couch_db:open_int(DbName, []), {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]), @@ -224,19 +221,23 @@ external_size(Info) -> wait_compaction(DbName, Kind, Line) -> WaitFun = fun() -> - case is_compaction_running(DbName) of - true -> wait; - false -> ok - end + case is_compaction_running(DbName) of + true -> wait; + false -> ok + end end, case test_util:wait(WaitFun, ?TIMEOUT) of timeout -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, Line}, - {reason, "Timeout waiting for " - ++ Kind - ++ " database compaction"}]}); + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, Line}, + {reason, + "Timeout waiting for " ++ + Kind ++ + " database compaction"} + ]} + ); _ -> ok end. @@ -246,5 +247,5 @@ is_compaction_running(DbName) -> {ok, DbInfo} = couch_db:get_db_info(Db), {ok, ViewInfo} = couch_mrview:get_info(Db, ?DDOC_ID), couch_db:close(Db), - (couch_util:get_value(compact_running, ViewInfo) =:= true) - orelse (couch_util:get_value(compact_running, DbInfo) =:= true). + (couch_util:get_value(compact_running, ViewInfo) =:= true) orelse + (couch_util:get_value(compact_running, DbInfo) =:= true). diff --git a/src/couch/test/eunit/couchdb_location_header_tests.erl b/src/couch/test/eunit/couchdb_location_header_tests.erl index c6c039eb0..08870f8c8 100644 --- a/src/couch/test/eunit/couchdb_location_header_tests.erl +++ b/src/couch/test/eunit/couchdb_location_header_tests.erl @@ -17,7 +17,6 @@ -define(TIMEOUT, 1000). - setup() -> DbName = ?tempdb(), {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), @@ -32,16 +31,17 @@ teardown({_, DbName}) -> ok = couch_server:delete(?l2b(DbName), [?ADMIN_CTX]), ok. - header_test_() -> { "CouchDB Location Header Tests", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_work_with_newlines_in_docs/1, fun should_work_with_newlines_in_attachments/1 @@ -56,10 +56,14 @@ should_work_with_newlines_in_docs({Host, DbName}) -> ?_assertEqual( Url, begin - {ok, _, Headers, _} = test_request:put(Url, - [{"Content-Type", "application/json"}], "{}"), + {ok, _, Headers, _} = test_request:put( + Url, + [{"Content-Type", "application/json"}], + "{}" + ), proplists:get_value("Location", Headers) - end)}. + end + )}. should_work_with_newlines_in_attachments({Host, DbName}) -> Url = Host ++ "/" ++ DbName, @@ -75,4 +79,5 @@ should_work_with_newlines_in_attachments({Host, DbName}) -> ], {ok, _, Headers, _} = test_request:put(AttUrl, Headers0, Body), proplists:get_value("Location", Headers) - end)}. + end + )}. diff --git a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl index 3a560edce..9822542f3 100644 --- a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl +++ b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl @@ -15,24 +15,24 @@ -include_lib("couch/include/couch_eunit.hrl"). -include_lib("couch/include/couch_db.hrl"). - - --define(DDOC, {[ - {<<"_id">>, <<"_design/foo">>}, - {<<"shows">>, {[ - {<<"bar">>, <<"function(doc, req) {return '<h1>wosh</h1>';}">>} - ]}} -]}). +-define(DDOC, + {[ + {<<"_id">>, <<"_design/foo">>}, + {<<"shows">>, + {[ + {<<"bar">>, <<"function(doc, req) {return '<h1>wosh</h1>';}">>} + ]}} + ]} +). -define(USER, "mrview_cors_test_admin"). -define(PASS, "pass"). -define(AUTH, {basic_auth, {?USER, ?PASS}}). - start() -> Ctx = test_util:start_couch([chttpd]), Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false), ok = config:set("chttpd", "enable_cors", "true", false), ok = config:set("vhosts", "example.com", "/", false), Ctx. @@ -49,7 +49,7 @@ setup(PortType) -> {Host, ?b2l(DbName)}. teardown(Ctx) -> - ok = config:delete("admins", ?USER, _Persist=false), + ok = config:delete("admins", ?USER, _Persist = false), test_util:stop_couch(Ctx). teardown(PortType, {_Host, DbName}) -> @@ -61,7 +61,8 @@ cors_test_() -> "CORS for mrview", { setup, - fun start/0, fun teardown/1, + fun start/0, + fun teardown/1, [show_tests()] } }. @@ -83,13 +84,16 @@ make_test_case(Mod, Funs) -> should_make_shows_request(_, {Host, DbName}) -> ?_test(begin - ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_show/bar", - Headers = [{"Origin", "http://example.com"}, - {"Access-Control-Request-Method", "GET"}, ?AUTH], - {ok, _, Resp, Body} = test_request:get(ReqUrl, Headers), - Origin = proplists:get_value("Access-Control-Allow-Origin", Resp), - ?assertEqual("http://example.com", Origin), - ?assertEqual(<<"<h1>wosh</h1>">>, Body) + ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_show/bar", + Headers = [ + {"Origin", "http://example.com"}, + {"Access-Control-Request-Method", "GET"}, + ?AUTH + ], + {ok, _, Resp, Body} = test_request:get(ReqUrl, Headers), + Origin = proplists:get_value("Access-Control-Allow-Origin", Resp), + ?assertEqual("http://example.com", Origin), + ?assertEqual(<<"<h1>wosh</h1>">>, Body) end). create_db(backdoor, DbName) -> @@ -111,7 +115,6 @@ assert_success(create_db, Status) -> true = lists:member(Status, [201, 202]); assert_success(delete_db, Status) -> true = lists:member(Status, [200, 202]). - host_url(PortType) -> "http://" ++ bind_address(PortType) ++ ":" ++ port(PortType). @@ -132,7 +135,6 @@ port(clustered) -> port(backdoor) -> integer_to_list(mochiweb_socket_server:get(couch_httpd, port)). - upload_ddoc(Host, DbName) -> Url = Host ++ "/" ++ DbName ++ "/_design/foo", Body = couch_util:json_encode(?DDOC), diff --git a/src/couch/test/eunit/couchdb_mrview_tests.erl b/src/couch/test/eunit/couchdb_mrview_tests.erl index ec77b190d..606c9c39a 100644 --- a/src/couch/test/eunit/couchdb_mrview_tests.erl +++ b/src/couch/test/eunit/couchdb_mrview_tests.erl @@ -15,41 +15,46 @@ -include_lib("couch/include/couch_eunit.hrl"). -include_lib("couch/include/couch_db.hrl"). - - --define(DDOC, {[ - {<<"_id">>, <<"_design/foo">>}, - {<<"shows">>, {[ - {<<"bar">>, <<"function(doc, req) {return '<h1>wosh</h1>';}">>} - ]}}, - {<<"updates">>, {[ - {<<"report">>, <<"function(doc, req) {" - "var data = JSON.parse(req.body); " - "return ['test', data];" - "}">>} - ]}}, - {<<"views">>, {[ - {<<"view1">>, {[ - {<<"map">>, <<"function(doc){emit(doc._id, doc._rev)}">>} - ]}} - ]}} -]}). +-define(DDOC, + {[ + {<<"_id">>, <<"_design/foo">>}, + {<<"shows">>, + {[ + {<<"bar">>, <<"function(doc, req) {return '<h1>wosh</h1>';}">>} + ]}}, + {<<"updates">>, + {[ + {<<"report">>, << + "function(doc, req) {" + "var data = JSON.parse(req.body); " + "return ['test', data];" + "}" + >>} + ]}}, + {<<"views">>, + {[ + {<<"view1">>, + {[ + {<<"map">>, <<"function(doc){emit(doc._id, doc._rev)}">>} + ]}} + ]}} + ]} +). -define(USER, "admin"). -define(PASS, "pass"). -define(AUTH, {basic_auth, {?USER, ?PASS}}). - setup_all() -> Ctx = test_util:start_couch([chttpd]), ok = meck:new(mochiweb_socket, [passthrough]), Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false), Ctx. teardown_all(Ctx) -> meck:unload(), - ok = config:delete("admins", ?USER, _Persist=false), + ok = config:delete("admins", ?USER, _Persist = false), test_util:stop_couch(Ctx). setup(PortType) -> @@ -108,7 +113,6 @@ mrview_cleanup_index_files_test_() -> } }. - make_test_case(Mod, Funs) -> { lists:flatten(io_lib:format("~s", [Mod])), @@ -122,33 +126,38 @@ make_test_case(Mod, Funs) -> should_return_invalid_request_body(PortType, {Host, DbName}) -> ?_test(begin - ok = create_doc(PortType, ?l2b(DbName), <<"doc_id">>, {[]}), - ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_update/report/doc_id", - {ok, Status, _Headers, Body} = - test_request:post(ReqUrl, [?AUTH], <<"{truncated}">>), - {Props} = jiffy:decode(Body), - ?assertEqual( - <<"bad_request">>, couch_util:get_value(<<"error">>, Props)), - ?assertEqual( - <<"Invalid request body">>, couch_util:get_value(<<"reason">>, Props)), - ?assertEqual(400, Status), - ok + ok = create_doc(PortType, ?l2b(DbName), <<"doc_id">>, {[]}), + ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_update/report/doc_id", + {ok, Status, _Headers, Body} = + test_request:post(ReqUrl, [?AUTH], <<"{truncated}">>), + {Props} = jiffy:decode(Body), + ?assertEqual( + <<"bad_request">>, couch_util:get_value(<<"error">>, Props) + ), + ?assertEqual( + <<"Invalid request body">>, couch_util:get_value(<<"reason">>, Props) + ), + ?assertEqual(400, Status), + ok end). should_return_400_for_wrong_order_of_keys(_PortType, {Host, DbName}) -> Args = [{start_key, "\"bbb\""}, {end_key, "\"aaa\""}], ?_test(begin - ReqUrl = Host ++ "/" ++ DbName - ++ "/_design/foo/_view/view1?" ++ mochiweb_util:urlencode(Args), - {ok, Status, _Headers, Body} = test_request:get(ReqUrl, [?AUTH]), - {Props} = jiffy:decode(Body), - ?assertEqual( - <<"query_parse_error">>, couch_util:get_value(<<"error">>, Props)), - ?assertEqual( + ReqUrl = + Host ++ "/" ++ DbName ++ + "/_design/foo/_view/view1?" ++ mochiweb_util:urlencode(Args), + {ok, Status, _Headers, Body} = test_request:get(ReqUrl, [?AUTH]), + {Props} = jiffy:decode(Body), + ?assertEqual( + <<"query_parse_error">>, couch_util:get_value(<<"error">>, Props) + ), + ?assertEqual( <<"No rows can match your key range, reverse your start_key and end_key or set descending=true">>, - couch_util:get_value(<<"reason">>, Props)), - ?assertEqual(400, Status), - ok + couch_util:get_value(<<"reason">>, Props) + ), + ?assertEqual(400, Status), + ok end). should_cleanup_index_files(_PortType, {Host, DbName}) -> @@ -167,30 +176,34 @@ should_cleanup_index_files(_PortType, {Host, DbName}) -> % It is hard to simulate inactive view. % Since couch_mrview:cleanup is called on view definition change. % That's why we just create extra files in place - ToDelete = lists:map(fun(FilePath) -> - ViewFile = filename:join([ - filename:dirname(FilePath), - "11111111111111111111111111111111.view"]), - file:write_file(ViewFile, <<>>), - ViewFile - end, FileList0), + ToDelete = lists:map( + fun(FilePath) -> + ViewFile = filename:join([ + filename:dirname(FilePath), + "11111111111111111111111111111111.view" + ]), + file:write_file(ViewFile, <<>>), + ViewFile + end, + FileList0 + ), FileList1 = filelib:wildcard(IndexWildCard), ?assertEqual([], lists:usort(FileList1 -- (FileList0 ++ ToDelete))), CleanupUrl = Host ++ "/" ++ DbName ++ "/_view_cleanup", {ok, _Status1, _Headers1, _Body1} = test_request:post( - CleanupUrl, [], <<>>, [?AUTH]), + CleanupUrl, [], <<>>, [?AUTH] + ), test_util:wait(fun() -> - IndexFiles = filelib:wildcard(IndexWildCard), - case lists:usort(FileList0) == lists:usort(IndexFiles) of - false -> wait; - true -> ok - end + IndexFiles = filelib:wildcard(IndexWildCard), + case lists:usort(FileList0) == lists:usort(IndexFiles) of + false -> wait; + true -> ok + end end), ok end). - create_doc(backdoor, DbName, Id, Body) -> JsonDoc = couch_util:json_apply_field({<<"_id">>, Id}, Body), Doc = couch_doc:from_json_obj(JsonDoc), @@ -223,7 +236,6 @@ assert_success(create_db, Status) -> assert_success(delete_db, Status) -> ?assert(lists:member(Status, [200, 202])). - host_url(PortType) -> "http://" ++ bind_address(PortType) ++ ":" ++ port(PortType). @@ -243,7 +255,6 @@ port(clustered) -> port(backdoor) -> integer_to_list(mochiweb_socket_server:get(couch_httpd, port)). - upload_ddoc(Host, DbName) -> Url = Host ++ "/" ++ DbName ++ "/_design/foo", Body = couch_util:json_encode(?DDOC), diff --git a/src/couch/test/eunit/couchdb_os_proc_pool.erl b/src/couch/test/eunit/couchdb_os_proc_pool.erl index b552e114a..620265b32 100644 --- a/src/couch/test/eunit/couchdb_os_proc_pool.erl +++ b/src/couch/test/eunit/couchdb_os_proc_pool.erl @@ -17,7 +17,6 @@ -define(TIMEOUT, 1000). - setup() -> ok = couch_proc_manager:reload(), meck:new(couch_os_process, [passthrough]), @@ -32,15 +31,17 @@ os_proc_pool_test_() -> "OS processes pool tests", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ should_block_new_proc_on_full_pool(), should_free_slot_on_proc_unexpected_exit(), should_reuse_known_proc(), -% should_process_waiting_queue_as_fifo(), + % should_process_waiting_queue_as_fifo(), should_reduce_pool_on_idle_os_procs(), should_not_return_broken_process_to_the_pool() ] @@ -48,7 +49,6 @@ os_proc_pool_test_() -> } }. - should_block_new_proc_on_full_pool() -> ?_test(begin Client1 = spawn_client(), @@ -78,12 +78,14 @@ should_block_new_proc_on_full_pool() -> ?assertEqual(Proc1#proc.pid, Proc4#proc.pid), ?assertNotEqual(Proc1#proc.client, Proc4#proc.client), - lists:map(fun(C) -> - ?assertEqual(ok, stop_client(C)) - end, [Client2, Client3, Client4]) + lists:map( + fun(C) -> + ?assertEqual(ok, stop_client(C)) + end, + [Client2, Client3, Client4] + ) end). - should_free_slot_on_proc_unexpected_exit() -> ?_test(begin Client1 = spawn_client(), @@ -119,12 +121,14 @@ should_free_slot_on_proc_unexpected_exit() -> ?assertNotEqual(Proc3#proc.pid, Proc4#proc.pid), ?assertNotEqual(Proc3#proc.client, Proc4#proc.client), - lists:map(fun(C) -> - ?assertEqual(ok, stop_client(C)) - end, [Client2, Client3, Client4]) + lists:map( + fun(C) -> + ?assertEqual(ok, stop_client(C)) + end, + [Client2, Client3, Client4] + ) end). - should_reuse_known_proc() -> ?_test(begin Client1 = spawn_client(<<"ddoc1">>), @@ -150,7 +154,6 @@ should_reuse_known_proc() -> ?assertEqual(ok, stop_client(Client1Again)) end). - %should_process_waiting_queue_as_fifo() -> % ?_test(begin % Client1 = spawn_client(<<"ddoc1">>), @@ -181,12 +184,15 @@ should_reuse_known_proc() -> % ?assertEqual(ok, stop_client(Client5)) % end). - should_reduce_pool_on_idle_os_procs() -> ?_test(begin %% os_process_idle_limit is in sec - config:set("query_server_config", - "os_process_idle_limit", "1", false), + config:set( + "query_server_config", + "os_process_idle_limit", + "1", + false + ), ok = confirm_config("os_process_idle_limit", "1"), Client1 = spawn_client(<<"ddoc1">>), @@ -207,15 +213,22 @@ should_reduce_pool_on_idle_os_procs() -> ?assertEqual(1, couch_proc_manager:get_proc_count()) end). - should_not_return_broken_process_to_the_pool() -> ?_test(begin - config:set("query_server_config", - "os_process_soft_limit", "1", false), + config:set( + "query_server_config", + "os_process_soft_limit", + "1", + false + ), ok = confirm_config("os_process_soft_limit", "1"), - config:set("query_server_config", - "os_process_limit", "1", false), + config:set( + "query_server_config", + "os_process_limit", + "1", + false + ), ok = confirm_config("os_process_limit", "1"), DDoc1 = ddoc(<<"_design/ddoc1">>), @@ -227,9 +240,12 @@ should_not_return_broken_process_to_the_pool() -> ?assertEqual(0, meck:num_calls(couch_os_process, stop, 1)), ?assertEqual(1, couch_proc_manager:get_proc_count()), - ?assertError(bad, couch_query_servers:with_ddoc_proc(DDoc1, fun(_) -> - error(bad) - end)), + ?assertError( + bad, + couch_query_servers:with_ddoc_proc(DDoc1, fun(_) -> + error(bad) + end) + ), ?assertEqual(1, meck:num_calls(couch_os_process, stop, 1)), WaitFun = fun() -> @@ -250,19 +266,21 @@ should_not_return_broken_process_to_the_pool() -> ?assertEqual(1, couch_proc_manager:get_proc_count()) end). - ddoc(DDocId) -> #doc{ id = DDocId, revs = {1, [<<"abc">>]}, - body = {[ - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {<<"v1">>, {[ - {<<"map">>, <<"function(doc) {emit(doc.value,1);}">>} - ]}} - ]}} - ]} + body = + {[ + {<<"language">>, <<"javascript">>}, + {<<"views">>, + {[ + {<<"v1">>, + {[ + {<<"map">>, <<"function(doc) {emit(doc.value,1);}">>} + ]}} + ]}} + ]} }. setup_config() -> @@ -279,11 +297,13 @@ confirm_config(Key, Value, Count) -> Value -> ok; _ when Count > 10 -> - erlang:error({config_setup, [ - {module, ?MODULE}, - {line, ?LINE}, - {value, timeout} - ]}); + erlang:error( + {config_setup, [ + {module, ?MODULE}, + {line, ?LINE}, + {value, timeout} + ]} + ); _ -> %% we need to wait to let gen_server:cast finish timer:sleep(10), @@ -304,7 +324,7 @@ spawn_client(DDocId) -> Ref = make_ref(), Pid = spawn(fun() -> DDocKey = {DDocId, <<"1-abcdefgh">>}, - DDoc = #doc{body={[{<<"language">>, <<"erlang">>}]}}, + DDoc = #doc{body = {[{<<"language">>, <<"erlang">>}]}}, Proc = couch_query_servers:get_ddoc_process(DDoc, DDocKey), loop(Parent, Ref, Proc) end), @@ -324,11 +344,15 @@ get_client_proc({Pid, Ref}, ClientName) -> receive {proc, Ref, Proc} -> Proc after ?TIMEOUT -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Timeout getting client " - ++ ClientName ++ " proc"}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, + "Timeout getting client " ++ + ClientName ++ " proc"} + ]} + ) end. stop_client({Pid, Ref}) -> @@ -354,7 +378,7 @@ loop(Parent, Ref, Proc) -> ping -> Parent ! {pong, Ref}, loop(Parent, Ref, Proc); - get_proc -> + get_proc -> Parent ! {proc, Ref, Proc}, loop(Parent, Ref, Proc); stop -> diff --git a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl index 1329aba27..a7d449a2d 100644 --- a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl +++ b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl @@ -24,12 +24,15 @@ start() -> test_util:start_couch(). - setup() -> DbName = ?tempdb(), {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX, overwrite]), - Doc = couch_doc:from_json_obj({[{<<"_id">>, ?DOC_ID}, - {<<"value">>, 0}]}), + Doc = couch_doc:from_json_obj( + {[ + {<<"_id">>, ?DOC_ID}, + {<<"value">>, 0} + ]} + ), {ok, Rev} = couch_db:update_doc(Db, Doc, []), ok = couch_db:close(Db), RevStr = couch_doc:rev_to_str(Rev), @@ -43,13 +46,13 @@ teardown({DbName, _}) -> teardown(_, {DbName, _RevStr}) -> teardown({DbName, _RevStr}). - view_indexes_cleanup_test_() -> { "Update conflicts", { setup, - fun start/0, fun test_util:stop_couch/1, + fun start/0, + fun test_util:stop_couch/1, [ concurrent_updates(), bulk_docs_updates() @@ -57,23 +60,27 @@ view_indexes_cleanup_test_() -> } }. -concurrent_updates()-> +concurrent_updates() -> { "Concurrent updates", { foreachx, - fun setup/1, fun teardown/2, - [{NumClients, fun should_concurrently_update_doc/2} - || NumClients <- ?NUM_CLIENTS] + fun setup/1, + fun teardown/2, + [ + {NumClients, fun should_concurrently_update_doc/2} + || NumClients <- ?NUM_CLIENTS + ] } }. -bulk_docs_updates()-> +bulk_docs_updates() -> { "Bulk docs updates", { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_bulk_create_delete_doc/1, fun should_bulk_create_local_doc/1, @@ -82,38 +89,41 @@ bulk_docs_updates()-> } }. +should_concurrently_update_doc(NumClients, {DbName, InitRev}) -> + { + ?i2l(NumClients) ++ " clients", + {inorder, [ + {"update doc", + {timeout, ?TIMEOUT div 1000, + ?_test(concurrent_doc_update(NumClients, DbName, InitRev))}}, + {"ensure in single leaf", ?_test(ensure_in_single_revision_leaf(DbName))} + ]} + }. -should_concurrently_update_doc(NumClients, {DbName, InitRev})-> - {?i2l(NumClients) ++ " clients", - {inorder, - [{"update doc", - {timeout, ?TIMEOUT div 1000, - ?_test(concurrent_doc_update(NumClients, DbName, InitRev))}}, - {"ensure in single leaf", - ?_test(ensure_in_single_revision_leaf(DbName))}]}}. - -should_bulk_create_delete_doc({DbName, InitRev})-> +should_bulk_create_delete_doc({DbName, InitRev}) -> ?_test(bulk_delete_create(DbName, InitRev)). -should_bulk_create_local_doc({DbName, _})-> +should_bulk_create_local_doc({DbName, _}) -> ?_test(bulk_create_local_doc(DbName)). -should_ignore_invalid_local_doc({DbName, _})-> +should_ignore_invalid_local_doc({DbName, _}) -> ?_test(ignore_invalid_local_doc(DbName)). - concurrent_doc_update(NumClients, DbName, InitRev) -> Clients = lists:map( fun(Value) -> - ClientDoc = couch_doc:from_json_obj({[ - {<<"_id">>, ?DOC_ID}, - {<<"_rev">>, InitRev}, - {<<"value">>, Value} - ]}), + ClientDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, ?DOC_ID}, + {<<"_rev">>, InitRev}, + {<<"value">>, Value} + ]} + ), Pid = spawn_client(DbName, ClientDoc), {Value, Pid, erlang:monitor(process, Pid)} end, - lists:seq(1, NumClients)), + lists:seq(1, NumClients) + ), lists:foreach(fun({_, Pid, _}) -> Pid ! go end, Clients), @@ -125,20 +135,31 @@ concurrent_doc_update(NumClients, DbName, InitRev) -> {'DOWN', MonRef, process, Pid, conflict} -> {AccConflicts + 1, AccValue}; {'DOWN', MonRef, process, Pid, Error} -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Client " ++ ?i2l(Value) - ++ " got update error: " - ++ couch_util:to_list(Error)}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, + "Client " ++ ?i2l(Value) ++ + " got update error: " ++ + couch_util:to_list(Error)} + ]} + ) after ?TIMEOUT div 2 -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Timeout waiting for client " - ++ ?i2l(Value) ++ " to die"}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, + "Timeout waiting for client " ++ + ?i2l(Value) ++ " to die"} + ]} + ) end - end, {0, nil}, Clients), + end, + {0, nil}, + Clients + ), ?assertEqual(NumClients - 1, NumConflicts), {ok, Db} = couch_db:open_int(DbName, []), @@ -171,15 +192,19 @@ ensure_in_single_revision_leaf(DbName) -> bulk_delete_create(DbName, InitRev) -> {ok, Db} = couch_db:open_int(DbName, []), - DeletedDoc = couch_doc:from_json_obj({[ - {<<"_id">>, ?DOC_ID}, - {<<"_rev">>, InitRev}, - {<<"_deleted">>, true} - ]}), - NewDoc = couch_doc:from_json_obj({[ - {<<"_id">>, ?DOC_ID}, - {<<"value">>, 666} - ]}), + DeletedDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, ?DOC_ID}, + {<<"_rev">>, InitRev}, + {<<"_deleted">>, true} + ]} + ), + NewDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, ?DOC_ID}, + {<<"value">>, 666} + ]} + ), {ok, Results} = couch_db:update_docs(Db, [DeletedDoc, NewDoc], []), ok = couch_db:close(Db), @@ -189,9 +214,11 @@ bulk_delete_create(DbName, InitRev) -> {ok, Db2} = couch_db:open_int(DbName, []), {ok, [{ok, Doc1}]} = couch_db:open_doc_revs( - Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts]), + Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts] + ), {ok, [{ok, Doc2}]} = couch_db:open_doc_revs( - Db2, ?DOC_ID, [Rev2], [conflicts, deleted_conflicts]), + Db2, ?DOC_ID, [Rev2], [conflicts, deleted_conflicts] + ), ok = couch_db:close(Db2), {Doc1Props} = couch_doc:to_json_obj(Doc1, []), @@ -200,40 +227,75 @@ bulk_delete_create(DbName, InitRev) -> %% Document was deleted ?assert(couch_util:get_value(<<"_deleted">>, Doc1Props)), %% New document not flagged as deleted - ?assertEqual(undefined, couch_util:get_value(<<"_deleted">>, - Doc2Props)), + ?assertEqual( + undefined, + couch_util:get_value( + <<"_deleted">>, + Doc2Props + ) + ), %% New leaf revision has the right value - ?assertEqual(666, couch_util:get_value(<<"value">>, - Doc2Props)), + ?assertEqual( + 666, + couch_util:get_value( + <<"value">>, + Doc2Props + ) + ), %% Deleted document has no conflicts - ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>, - Doc1Props)), + ?assertEqual( + undefined, + couch_util:get_value( + <<"_conflicts">>, + Doc1Props + ) + ), %% Deleted document has no deleted conflicts - ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>, - Doc1Props)), + ?assertEqual( + undefined, + couch_util:get_value( + <<"_deleted_conflicts">>, + Doc1Props + ) + ), %% New leaf revision doesn't have conflicts - ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>, - Doc1Props)), + ?assertEqual( + undefined, + couch_util:get_value( + <<"_conflicts">>, + Doc1Props + ) + ), %% New leaf revision doesn't have deleted conflicts - ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>, - Doc1Props)), + ?assertEqual( + undefined, + couch_util:get_value( + <<"_deleted_conflicts">>, + Doc1Props + ) + ), %% Deleted revision has position 2 ?assertEqual(2, element(1, Rev1)), %% New leaf revision has position 3 ?assertEqual(3, element(1, Rev2)). - bulk_create_local_doc(DbName) -> {ok, Db} = couch_db:open_int(DbName, []), - LocalDoc = couch_doc:from_json_obj({[ - {<<"_id">>, ?LOCAL_DOC_ID}, - {<<"_rev">>, <<"0-1">>} - ]}), - - {ok, Results} = couch_db:update_docs(Db, [LocalDoc], - [], replicated_changes), + LocalDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, ?LOCAL_DOC_ID}, + {<<"_rev">>, <<"0-1">>} + ]} + ), + + {ok, Results} = couch_db:update_docs( + Db, + [LocalDoc], + [], + replicated_changes + ), ok = couch_db:close(Db), ?assertEqual([], Results), @@ -243,17 +305,22 @@ bulk_create_local_doc(DbName) -> ?assertEqual(?LOCAL_DOC_ID, LocalDoc1#doc.id), ?assertEqual({0, [<<"2">>]}, LocalDoc1#doc.revs). - ignore_invalid_local_doc(DbName) -> {ok, Db} = couch_db:open_int(DbName, []), - LocalDoc = couch_doc:from_json_obj({[ - {<<"_id">>, ?LOCAL_DOC_ID}, - {<<"_rev">>, <<"0-abcdef">>} - ]}), - - {ok, Results} = couch_db:update_docs(Db, [LocalDoc], - [], replicated_changes), + LocalDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, ?LOCAL_DOC_ID}, + {<<"_rev">>, <<"0-abcdef">>} + ]} + ), + + {ok, Results} = couch_db:update_docs( + Db, + [LocalDoc], + [], + replicated_changes + ), ok = couch_db:close(Db), ?assertEqual([], Results), @@ -262,7 +329,6 @@ ignore_invalid_local_doc(DbName) -> ok = couch_db:close(Db2), ?assertEqual({not_found, missing}, Result2). - spawn_client(DbName, Doc) -> spawn(fun() -> {ok, Db} = couch_db:open_int(DbName, []), @@ -270,11 +336,13 @@ spawn_client(DbName, Doc) -> go -> ok end, erlang:yield(), - Result = try - couch_db:update_doc(Db, Doc, []) - catch _:Error -> - Error - end, + Result = + try + couch_db:update_doc(Db, Doc, []) + catch + _:Error -> + Error + end, ok = couch_db:close(Db), exit(Result) end). diff --git a/src/couch/test/eunit/couchdb_vhosts_tests.erl b/src/couch/test/eunit/couchdb_vhosts_tests.erl index fbe5579cd..635b8f9a6 100644 --- a/src/couch/test/eunit/couchdb_vhosts_tests.erl +++ b/src/couch/test/eunit/couchdb_vhosts_tests.erl @@ -18,30 +18,35 @@ -define(TIMEOUT, 1000). -define(iofmt(S, A), lists:flatten(io_lib:format(S, A))). - setup() -> DbName = ?tempdb(), {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc1">>}, - {<<"value">>, 666} - ]}), + Doc = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"doc1">>}, + {<<"value">>, 666} + ]} + ), - Doc1 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/doc1">>}, - {<<"shows">>, {[ - {<<"test">>, <<"function(doc, req) { - return { json: { - requested_path: '/' + req.requested_path.join('/'), - path: '/' + req.path.join('/')}};}">>} - ]}}, - {<<"rewrites">>, [ - {[ - {<<"from">>, <<"/">>}, - {<<"to">>, <<"_show/test">>} + Doc1 = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"_design/doc1">>}, + {<<"shows">>, + {[ + {<<"test">>, + <<"function(doc, req) {\n" + " return { json: {\n" + " requested_path: '/' + req.requested_path.join('/'),\n" + " path: '/' + req.path.join('/')}};}">>} + ]}}, + {<<"rewrites">>, [ + {[ + {<<"from">>, <<"/">>}, + {<<"to">>, <<"_show/test">>} + ]} ]} ]} - ]}), + ), {ok, _} = couch_db:update_docs(Db, [Doc, Doc1]), couch_db:close(Db), @@ -54,16 +59,17 @@ teardown({_, DbName}) -> ok = couch_server:delete(?l2b(DbName), []), ok. - vhosts_test_() -> { "Virtual Hosts rewrite tests", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_return_database_info/1, fun should_return_revs_info/1, @@ -89,103 +95,146 @@ should_return_database_info({Url, DbName}) -> {JsonBody} = jiffy:decode(Body), ?assert(proplists:is_defined(<<"db_name">>, JsonBody)); Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])} + ]} + ) end end). should_return_revs_info({Url, DbName}) -> ?_test(begin ok = config:set("vhosts", "example.com", "/" ++ DbName, false), - case test_request:get(Url ++ "/doc1?revs_info=true", [], - [{host_header, "example.com"}]) of + case + test_request:get( + Url ++ "/doc1?revs_info=true", + [], + [{host_header, "example.com"}] + ) + of {ok, _, _, Body} -> {JsonBody} = jiffy:decode(Body), ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody)); Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])} + ]} + ) end end). should_return_virtual_request_path_field_in_request({Url, DbName}) -> ?_test(begin - ok = config:set("vhosts", "example1.com", - "/" ++ DbName ++ "/_design/doc1/_rewrite/", - false), + ok = config:set( + "vhosts", + "example1.com", + "/" ++ DbName ++ "/_design/doc1/_rewrite/", + false + ), case test_request:get(Url, [], [{host_header, "example1.com"}]) of {ok, _, _, Body} -> {Json} = jiffy:decode(Body), - ?assertEqual(<<"/">>, - proplists:get_value(<<"requested_path">>, Json)); + ?assertEqual( + <<"/">>, + proplists:get_value(<<"requested_path">>, Json) + ); Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])} + ]} + ) end end). should_return_real_request_path_field_in_request({Url, DbName}) -> ?_test(begin - ok = config:set("vhosts", "example1.com", - "/" ++ DbName ++ "/_design/doc1/_rewrite/", - false), + ok = config:set( + "vhosts", + "example1.com", + "/" ++ DbName ++ "/_design/doc1/_rewrite/", + false + ), case test_request:get(Url, [], [{host_header, "example1.com"}]) of {ok, _, _, Body} -> {Json} = jiffy:decode(Body), Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"), ?assertEqual(Path, proplists:get_value(<<"path">>, Json)); Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])} + ]} + ) end end). should_match_wildcard_vhost({Url, DbName}) -> ?_test(begin - ok = config:set("vhosts", "*.example.com", - "/" ++ DbName ++ "/_design/doc1/_rewrite", false), + ok = config:set( + "vhosts", + "*.example.com", + "/" ++ DbName ++ "/_design/doc1/_rewrite", + false + ), case test_request:get(Url, [], [{host_header, "test.example.com"}]) of {ok, _, _, Body} -> {Json} = jiffy:decode(Body), Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"), ?assertEqual(Path, proplists:get_value(<<"path">>, Json)); Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])} + ]} + ) end end). should_return_db_info_for_wildcard_vhost_for_custom_db({Url, DbName}) -> ?_test(begin - ok = config:set("vhosts", ":dbname.example1.com", - "/:dbname", false), + ok = config:set( + "vhosts", + ":dbname.example1.com", + "/:dbname", + false + ), Host = DbName ++ ".example1.com", case test_request:get(Url, [], [{host_header, Host}]) of {ok, _, _, Body} -> {JsonBody} = jiffy:decode(Body), ?assert(proplists:is_defined(<<"db_name">>, JsonBody)); Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])} + ]} + ) end end). should_replace_rewrite_variables_for_db_and_doc({Url, DbName}) -> ?_test(begin - ok = config:set("vhosts",":appname.:dbname.example1.com", - "/:dbname/_design/:appname/_rewrite/", false), + ok = config:set( + "vhosts", + ":appname.:dbname.example1.com", + "/:dbname/_design/:appname/_rewrite/", + false + ), Host = "doc1." ++ DbName ++ ".example1.com", case test_request:get(Url, [], [{host_header, Host}]) of {ok, _, _, Body} -> @@ -193,45 +242,61 @@ should_replace_rewrite_variables_for_db_and_doc({Url, DbName}) -> Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"), ?assertEqual(Path, proplists:get_value(<<"path">>, Json)); Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])} + ]} + ) end end). should_return_db_info_for_vhost_with_resource({Url, DbName}) -> ?_test(begin - ok = config:set("vhosts", - "example.com/test", "/" ++ DbName, false), + ok = config:set( + "vhosts", + "example.com/test", + "/" ++ DbName, + false + ), ReqUrl = Url ++ "/test", case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of {ok, _, _, Body} -> {JsonBody} = jiffy:decode(Body), ?assert(proplists:is_defined(<<"db_name">>, JsonBody)); Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])} + ]} + ) end end). - should_return_revs_info_for_vhost_with_resource({Url, DbName}) -> ?_test(begin - ok = config:set("vhosts", - "example.com/test", "/" ++ DbName, false), + ok = config:set( + "vhosts", + "example.com/test", + "/" ++ DbName, + false + ), ReqUrl = Url ++ "/test/doc1?revs_info=true", case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of {ok, _, _, Body} -> {JsonBody} = jiffy:decode(Body), ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody)); Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])} + ]} + ) end end). @@ -245,27 +310,36 @@ should_return_db_info_for_vhost_with_wildcard_resource({Url, DbName}) -> {JsonBody} = jiffy:decode(Body), ?assert(proplists:is_defined(<<"db_name">>, JsonBody)); Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])} + ]} + ) end end). should_return_path_for_vhost_with_wildcard_host({Url, DbName}) -> ?_test(begin - ok = config:set("vhosts", "*/test1", - "/" ++ DbName ++ "/_design/doc1/_show/test", - false), + ok = config:set( + "vhosts", + "*/test1", + "/" ++ DbName ++ "/_design/doc1/_show/test", + false + ), case test_request:get(Url ++ "/test1") of {ok, _, _, Body} -> {Json} = jiffy:decode(Body), Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"), ?assertEqual(Path, proplists:get_value(<<"path">>, Json)); Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])} + ]} + ) end end). diff --git a/src/couch/test/eunit/couchdb_views_tests.erl b/src/couch/test/eunit/couchdb_views_tests.erl index 06e2f03eb..f4d51bdd0 100644 --- a/src/couch/test/eunit/couchdb_views_tests.erl +++ b/src/couch/test/eunit/couchdb_views_tests.erl @@ -48,10 +48,18 @@ setup_legacy() -> DbDir = config:get("couchdb", "database_dir"), ViewDir = config:get("couchdb", "view_index_dir"), - OldViewFilePath = filename:join([ViewDir, ".test_design", "mrview", - OldViewName]), - NewViewFilePath = filename:join([ViewDir, ".test_design", "mrview", - NewViewName]), + OldViewFilePath = filename:join([ + ViewDir, + ".test_design", + "mrview", + OldViewName + ]), + NewViewFilePath = filename:join([ + ViewDir, + ".test_design", + "mrview", + NewViewName + ]), NewDbFilePath = filename:join([DbDir, DbFileName]), @@ -84,10 +92,12 @@ view_indexes_cleanup_test_() -> "View indexes cleanup", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_have_two_indexes_alive_before_deletion/1, fun should_cleanup_index_file_after_ddoc_deletion/1, @@ -102,10 +112,12 @@ view_group_db_leaks_test_() -> "View group db leaks", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, - fun setup_with_docs/0, fun teardown/1, + fun setup_with_docs/0, + fun teardown/1, [ fun couchdb_1138/1, fun couchdb_1309/1 @@ -136,10 +148,12 @@ backup_restore_test_() -> "Upgrade and bugs related tests", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, - fun setup_with_docs/0, fun teardown/1, + fun setup_with_docs/0, + fun teardown/1, [ fun should_not_remember_docs_in_index_after_backup_restore/1 ] @@ -147,16 +161,17 @@ backup_restore_test_() -> } }. - upgrade_test_() -> { "Upgrade tests", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, - fun setup_legacy/0, fun teardown_legacy/1, + fun setup_legacy/0, + fun teardown_legacy/1, [ fun should_upgrade_legacy_view_files/1 ] @@ -184,7 +199,7 @@ should_not_remember_docs_in_index_after_backup_restore(DbName) -> ?assert(has_doc("doc2", Rows1)), ?assert(has_doc("doc3", Rows1)), ?assertNot(has_doc("doc666", Rows1)) - end). + end). should_upgrade_legacy_view_files({DbName, Files}) -> ?_test(begin @@ -206,21 +221,23 @@ should_upgrade_legacy_view_files({DbName, Files}) -> % add doc to trigger update DocUrl = db_url(DbName) ++ "/bar", {ok, _, _, _} = test_request:put( - DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":4}">>), + DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":4}">> + ), % query view for expected results Rows1 = query_view(DbName, "test", "test"), ?assertEqual(4, length(Rows1)), % ensure new header - timer:sleep(2000), % have to wait for awhile to upgrade the index + + % have to wait for awhile to upgrade the index + timer:sleep(2000), NewHeader = read_header(NewViewFilePath), ?assertMatch(#mrheader{}, NewHeader), NewViewStatus = hd(NewHeader#mrheader.view_states), ?assertEqual(3, tuple_size(NewViewStatus)) end). - should_have_two_indexes_alive_before_deletion({DbName, _}) -> view_cleanup(DbName), ?_assertEqual(2, count_index_files(DbName)). @@ -230,7 +247,7 @@ should_cleanup_index_file_after_ddoc_deletion({DbName, {FooRev, _}}) -> view_cleanup(DbName), ?_assertEqual(1, count_index_files(DbName)). -should_cleanup_all_index_files({DbName, {FooRev, BooRev}})-> +should_cleanup_all_index_files({DbName, {FooRev, BooRev}}) -> delete_design_doc(DbName, <<"_design/foo">>, FooRev), delete_design_doc(DbName, <<"_design/boo">>, BooRev), view_cleanup(DbName), @@ -239,7 +256,8 @@ should_cleanup_all_index_files({DbName, {FooRev, BooRev}})-> couchdb_1138(DbName) -> ?_test(begin {ok, IndexerPid} = couch_index_server:get_index( - couch_mrview_index, DbName, <<"_design/foo">>), + couch_mrview_index, DbName, <<"_design/foo">> + ), ?assert(is_pid(IndexerPid)), ?assert(is_process_alive(IndexerPid)), ?assertEqual(2, count_users(DbName)), @@ -277,7 +295,8 @@ couchdb_1138(DbName) -> couchdb_1309(DbName) -> ?_test(begin {ok, IndexerPid} = couch_index_server:get_index( - couch_mrview_index, DbName, <<"_design/foo">>), + couch_mrview_index, DbName, <<"_design/foo">> + ), ?assert(is_pid(IndexerPid)), ?assert(is_process_alive(IndexerPid)), ?assertEqual(2, count_users(DbName)), @@ -292,18 +311,21 @@ couchdb_1309(DbName) -> ?assert(is_process_alive(IndexerPid)), - update_design_doc(DbName, <<"_design/foo">>, <<"bar">>), + update_design_doc(DbName, <<"_design/foo">>, <<"bar">>), {ok, NewIndexerPid} = couch_index_server:get_index( - couch_mrview_index, DbName, <<"_design/foo">>), + couch_mrview_index, DbName, <<"_design/foo">> + ), ?assert(is_pid(NewIndexerPid)), ?assert(is_process_alive(NewIndexerPid)), ?assertNotEqual(IndexerPid, NewIndexerPid), - UserCnt = case count_users(DbName) of - N when N > 2 -> - timer:sleep(1000), - count_users(DbName); - N -> N - end, + UserCnt = + case count_users(DbName) of + N when N > 2 -> + timer:sleep(1000), + count_users(DbName); + N -> + N + end, ?assertEqual(2, UserCnt), Rows1 = query_view(DbName, "foo", "bar", ok), @@ -312,15 +334,20 @@ couchdb_1309(DbName) -> check_rows_value(Rows2, 1), ?assertEqual(4, length(Rows2)), - ok = stop_indexer( %% FIXME we need to grab monitor earlier - fun() -> ok end, - IndexerPid, ?LINE, - "old view group is not dead after ddoc update"), + %% FIXME we need to grab monitor earlier + ok = stop_indexer( + fun() -> ok end, + IndexerPid, + ?LINE, + "old view group is not dead after ddoc update" + ), ok = stop_indexer( - fun() -> couch_server:delete(DbName, [?ADMIN_USER]) end, - NewIndexerPid, ?LINE, - "new view group did not die after DB deletion") + fun() -> couch_server:delete(DbName, [?ADMIN_USER]) end, + NewIndexerPid, + ?LINE, + "new view group did not die after DB deletion" + ) end). couchdb_1283() -> @@ -328,41 +355,54 @@ couchdb_1283() -> ok = config:set("couchdb", "max_dbs_open", "3", false), {ok, MDb1} = couch_db:create(?tempdb(), [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/foo">>}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {<<"foo">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}}, - {<<"foo2">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}}, - {<<"foo3">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}}, - {<<"foo4">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}}, - {<<"foo5">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}} - ]}} - ]}), + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"_design/foo">>}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, + {[ + {<<"foo">>, + {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}}, + {<<"foo2">>, + {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}}, + {<<"foo3">>, + {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}}, + {<<"foo4">>, + {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}}, + {<<"foo5">>, + {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}} + ]}} + ]} + ), {ok, _} = couch_db:update_doc(MDb1, DDoc, []), ok = populate_db(MDb1, 100, 100), query_view(couch_db:name(MDb1), "foo", "foo"), ok = couch_db:close(MDb1), {ok, Pid} = couch_index_server:get_index( - couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">>), + couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">> + ), % Start and pause compacton WaitRef = erlang:make_ref(), meck:expect(couch_mrview_index, compact, fun(Db, State, Opts) -> - receive {WaitRef, From, init} -> ok end, + receive + {WaitRef, From, init} -> ok + end, From ! {WaitRef, inited}, - receive {WaitRef, go} -> ok end, + receive + {WaitRef, go} -> ok + end, meck:passthrough([Db, State, Opts]) end), @@ -373,7 +413,9 @@ couchdb_1283() -> % Make sure that our compactor is waiting for us % before we continue our assertions CPid ! {WaitRef, self(), init}, - receive {WaitRef, inited} -> ok end, + receive + {WaitRef, inited} -> ok + end, % Make sure that a compaction process takes a monitor % on the database's main_pid @@ -382,64 +424,74 @@ couchdb_1283() -> % Finish compaction to and make sure the monitor % disappears CPid ! {WaitRef, go}, - wait_for_process_shutdown(CRef, normal, - {reason, "Failure compacting view group"}), + wait_for_process_shutdown( + CRef, + normal, + {reason, "Failure compacting view group"} + ), % Make sure that the monitor was removed ?assertEqual(false, lists:member(CPid, couch_db:monitored_by(MDb1))) end). wait_for_process_shutdown(Pid, ExpectedReason, Error) -> - receive - {'DOWN', Pid, process, _, Reason} -> - ?assertEqual(ExpectedReason, Reason) - after ?TIMEOUT -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, ?LINE}, Error]}) - end. - + receive + {'DOWN', Pid, process, _, Reason} -> + ?assertEqual(ExpectedReason, Reason) + after ?TIMEOUT -> + erlang:error( + {assertion_failed, [{module, ?MODULE}, {line, ?LINE}, Error]} + ) + end. create_doc(DbName, DocId) when is_list(DocId) -> create_doc(DbName, ?l2b(DocId)); create_doc(DbName, DocId) when is_binary(DocId) -> {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - Doc666 = couch_doc:from_json_obj({[ - {<<"_id">>, DocId}, - {<<"value">>, 999} - ]}), + Doc666 = couch_doc:from_json_obj( + {[ + {<<"_id">>, DocId}, + {<<"value">>, 999} + ]} + ), {ok, _} = couch_db:update_docs(Db, [Doc666]), couch_db:close(Db). create_docs(DbName) -> {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - Doc1 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc1">>}, - {<<"value">>, 1} - - ]}), - Doc2 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc2">>}, - {<<"value">>, 2} - - ]}), - Doc3 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc3">>}, - {<<"value">>, 3} - - ]}), + Doc1 = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"doc1">>}, + {<<"value">>, 1} + ]} + ), + Doc2 = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"doc2">>}, + {<<"value">>, 2} + ]} + ), + Doc3 = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"doc3">>}, + {<<"value">>, 3} + ]} + ), {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]), couch_db:close(Db). populate_db(Db, BatchSize, N) when N > 0 -> Docs = lists:map( fun(_) -> - couch_doc:from_json_obj({[ - {<<"_id">>, couch_uuids:new()}, - {<<"value">>, base64:encode(crypto:strong_rand_bytes(1000))} - ]}) + couch_doc:from_json_obj( + {[ + {<<"_id">>, couch_uuids:new()}, + {<<"value">>, base64:encode(crypto:strong_rand_bytes(1000))} + ]} + ) end, - lists:seq(1, BatchSize)), + lists:seq(1, BatchSize) + ), {ok, _} = couch_db:update_docs(Db, Docs, []), populate_db(Db, BatchSize, N - length(Docs)); populate_db(_Db, _, _) -> @@ -447,15 +499,19 @@ populate_db(_Db, _, _) -> create_design_doc(DbName, DDName, ViewName) -> {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDName}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {ViewName, {[ - {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>} - ]}} - ]}} - ]}), + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, DDName}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, + {[ + {ViewName, + {[ + {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>} + ]}} + ]}} + ]} + ), {ok, Rev} = couch_db:update_doc(Db, DDoc, []), couch_db:close(Db), Rev. @@ -465,27 +521,33 @@ update_design_doc(DbName, DDName, ViewName) -> {ok, Doc} = couch_db:open_doc(Db, DDName, [?ADMIN_CTX]), {Props} = couch_doc:to_json_obj(Doc, []), Rev = couch_util:get_value(<<"_rev">>, Props), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDName}, - {<<"_rev">>, Rev}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {ViewName, {[ - {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>} - ]}} - ]}} - ]}), + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, DDName}, + {<<"_rev">>, Rev}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, + {[ + {ViewName, + {[ + {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>} + ]}} + ]}} + ]} + ), {ok, NewRev} = couch_db:update_doc(Db, DDoc, [?ADMIN_CTX]), couch_db:close(Db), NewRev. delete_design_doc(DbName, DDName, Rev) -> {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDName}, - {<<"_rev">>, couch_doc:rev_to_str(Rev)}, - {<<"_deleted">>, true} - ]}), + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, DDName}, + {<<"_rev">>, couch_doc:rev_to_str(Rev)}, + {<<"_deleted">>, true} + ]} + ), {ok, _} = couch_db:update_doc(Db, DDoc, [Rev]), couch_db:close(Db). @@ -499,11 +561,12 @@ query_view(DbName, DDoc, View) -> query_view(DbName, DDoc, View, Stale) -> {ok, Code, _Headers, Body} = test_request:get( - db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_view/" ++ View - ++ case Stale of - false -> []; - _ -> "?stale=" ++ atom_to_list(Stale) - end), + db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_view/" ++ View ++ + case Stale of + false -> []; + _ -> "?stale=" ++ atom_to_list(Stale) + end + ), ?assertEqual(200, Code), {Props} = jiffy:decode(Body), couch_util:get_value(<<"rows">>, Props, []). @@ -512,7 +575,9 @@ check_rows_value(Rows, Value) -> lists:foreach( fun({Row}) -> ?assertEqual(Value, couch_util:get_value(<<"value">>, Row)) - end, Rows). + end, + Rows + ). view_cleanup(DbName) -> {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), @@ -531,8 +596,12 @@ count_users(DbName) -> count_index_files(DbName) -> % call server to fetch the index files RootDir = config:get("couchdb", "view_index_dir"), - length(filelib:wildcard(RootDir ++ "/." ++ - binary_to_list(DbName) ++ "_design"++"/mrview/*")). + length( + filelib:wildcard( + RootDir ++ "/." ++ + binary_to_list(DbName) ++ "_design" ++ "/mrview/*" + ) + ). has_doc(DocId1, Rows) -> DocId = iolist_to_binary(DocId1), @@ -542,10 +611,11 @@ backup_db_file(DbName) -> {ok, Db} = couch_db:open_int(DbName, []), try SrcPath = couch_db:get_filepath(Db), - Src = if - is_list(SrcPath) -> SrcPath; - true -> binary_to_list(SrcPath) - end, + Src = + if + is_list(SrcPath) -> SrcPath; + true -> binary_to_list(SrcPath) + end, ok = copy_tree(Src, Src ++ ".backup") after couch_db:close(Db) @@ -559,17 +629,21 @@ restore_backup_db_file(DbName) -> exit(DbPid, shutdown), ok = copy_tree(Src ++ ".backup", Src), - test_util:wait(fun() -> - case couch_server:open(DbName, [{timeout, ?TIMEOUT}]) of - {ok, WaitDb} -> - case couch_db:get_pid(WaitDb) == DbPid of - true -> wait; - false -> ok - end; - Else -> - Else - end - end, ?TIMEOUT, ?DELAY). + test_util:wait( + fun() -> + case couch_server:open(DbName, [{timeout, ?TIMEOUT}]) of + {ok, WaitDb} -> + case couch_db:get_pid(WaitDb) == DbPid of + true -> wait; + false -> ok + end; + Else -> + Else + end + end, + ?TIMEOUT, + ?DELAY + ). compact_db(DbName) -> {ok, Db} = couch_db:open_int(DbName, []), @@ -578,20 +652,23 @@ compact_db(DbName) -> wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT). wait_db_compact_done(_DbName, 0) -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "DB compaction failed to finish"}]}); + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, "DB compaction failed to finish"} + ]} + ); wait_db_compact_done(DbName, N) -> {ok, Db} = couch_db:open_int(DbName, []), ok = couch_db:close(Db), CompactorPid = couch_db:get_compactor_pid(Db), case is_pid(CompactorPid) of - false -> - ok; - true -> - ok = timer:sleep(?DELAY), - wait_db_compact_done(DbName, N - 1) + false -> + ok; + true -> + ok = timer:sleep(?DELAY), + wait_db_compact_done(DbName, N - 1) end. compact_view_group(DbName, DDocId) when is_list(DDocId) -> @@ -601,13 +678,17 @@ compact_view_group(DbName, DDocId) when is_binary(DDocId) -> wait_view_compact_done(DbName, DDocId, 10). wait_view_compact_done(_DbName, _DDocId, 0) -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "DB compaction failed to finish"}]}); + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, "DB compaction failed to finish"} + ]} + ); wait_view_compact_done(DbName, DDocId, N) -> {ok, Code, _Headers, Body} = test_request:get( - db_url(DbName) ++ "/" ++ ?b2l(DDocId) ++ "/_info"), + db_url(DbName) ++ "/" ++ ?b2l(DDocId) ++ "/_info" + ), ?assertEqual(200, Code), {Info} = jiffy:decode(Body), {IndexInfo} = couch_util:get_value(<<"view_index">>, Info), @@ -628,13 +709,16 @@ read_header(File) -> stop_indexer(StopFun, Pid, Line, Reason) -> case test_util:stop_sync(Pid, StopFun) of - timeout -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, Line}, - {reason, Reason}]}); - ok -> - ok + timeout -> + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, Line}, + {reason, Reason} + ]} + ); + ok -> + ok end. wait_indexer(IndexerPid) -> diff --git a/src/couch/test/eunit/global_changes_tests.erl b/src/couch/test/eunit/global_changes_tests.erl index 4392aafac..92964bb74 100644 --- a/src/couch/test/eunit/global_changes_tests.erl +++ b/src/couch/test/eunit/global_changes_tests.erl @@ -35,7 +35,7 @@ http_create_db(Name) -> {ok, Status, _, _} = test_request:put(db_url(Name), [?AUTH], ""), true = lists:member(Status, [201, 202]), ok. - + http_delete_db(Name) -> {ok, Status, _, _} = test_request:delete(db_url(Name), [?AUTH]), true = lists:member(Status, [200, 202]), @@ -75,7 +75,8 @@ check_response() -> "Check response", { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_return_correct_response_on_create/1, fun should_return_correct_response_on_update/1 @@ -105,9 +106,11 @@ should_return_correct_response_on_update({Host, DbName}) -> create_doc(Host, DbName, Id) -> Headers = [?AUTH], Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id), - Body = jiffy:encode({[ - {key, "value"} - ]}), + Body = jiffy:encode( + {[ + {key, "value"} + ]} + ), {ok, Status, _Headers, _Body} = test_request:put(Url, Headers, Body), ?assert(Status =:= 201 orelse Status =:= 202), timer:sleep(1000), @@ -118,10 +121,12 @@ update_doc(Host, DbName, Id, Value) -> Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id), {ok, 200, _Headers0, BinBody} = test_request:get(Url, Headers), [Rev] = decode_response(BinBody, [<<"_rev">>]), - Body = jiffy:encode({[ - {key, Value}, - {'_rev', Rev} - ]}), + Body = jiffy:encode( + {[ + {key, Value}, + {'_rev', Rev} + ]} + ), {ok, Status, _Headers1, _Body} = test_request:put(Url, Headers, Body), ?assert(Status =:= 201 orelse Status =:= 202), timer:sleep(1000), @@ -145,7 +150,7 @@ decode_response(BinBody, ToDecode) -> add_admin(User, Pass) -> Hashed = couch_passwords:hash_admin_password(Pass), - config:set("admins", User, ?b2l(Hashed), _Persist=false). + config:set("admins", User, ?b2l(Hashed), _Persist = false). delete_admin(User) -> config:delete("admins", User, false). diff --git a/src/couch/test/eunit/json_stream_parse_tests.erl b/src/couch/test/eunit/json_stream_parse_tests.erl index e690d7728..ab26be725 100644 --- a/src/couch/test/eunit/json_stream_parse_tests.erl +++ b/src/couch/test/eunit/json_stream_parse_tests.erl @@ -14,83 +14,88 @@ -include_lib("couch/include/couch_eunit.hrl"). --define(CASES, - [ - {1, "1", "integer numeric literial"}, - {3.1416, "3.14160", "float numeric literal"}, % text representation may truncate, trail zeroes - {-1, "-1", "negative integer numeric literal"}, - {-3.1416, "-3.14160", "negative float numeric literal"}, - {12.0e10, "1.20000e+11", "float literal in scientific notation"}, - {1.234E+10, "1.23400e+10", "another float literal in scientific notation"}, - {-1.234E-10, "-1.23400e-10", "negative float literal in scientific notation"}, - {10.0, "1.0e+01", "yet another float literal in scientific notation"}, - {123.456, "1.23456E+2", "yet another float literal in scientific notation"}, - {10.0, "1e1", "yet another float literal in scientific notation"}, - {<<"foo">>, "\"foo\"", "string literal"}, - {<<"foo", 5, "bar">>, "\"foo\\u0005bar\"", "string literal with \\u0005"}, - {<<"">>, "\"\"", "empty string literal"}, - {<<"\n\n\n">>, "\"\\n\\n\\n\"", "only new lines literal"}, - {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\"", - "only white spaces string literal"}, - {null, "null", "null literal"}, - {true, "true", "true literal"}, - {false, "false", "false literal"}, - {<<"null">>, "\"null\"", "null string literal"}, - {<<"true">>, "\"true\"", "true string literal"}, - {<<"false">>, "\"false\"", "false string literal"}, - {{[]}, "{}", "empty object literal"}, - {{[{<<"foo">>, <<"bar">>}]}, "{\"foo\":\"bar\"}", - "simple object literal"}, - {{[{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]}, - "{\"foo\":\"bar\",\"baz\":123}", "another simple object literal"}, - {[], "[]", "empty array literal"}, - {[[]], "[[]]", "empty array literal inside a single element array literal"}, - {[1, <<"foo">>], "[1,\"foo\"]", "simple non-empty array literal"}, - {[1199344435545.0, 1], "[1199344435545.0,1]", - "another simple non-empty array literal"}, - {[false, true, 321, null], "[false, true, 321, null]", "array of literals"}, - {{[{<<"foo">>, [123]}]}, "{\"foo\":[123]}", - "object literal with an array valued property"}, - {{[{<<"foo">>, {[{<<"bar">>, true}]}}]}, - "{\"foo\":{\"bar\":true}}", "nested object literal"}, - {{[{<<"foo">>, []}, {<<"bar">>, {[{<<"baz">>, true}]}}, - {<<"alice">>, <<"bob">>}]}, - "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}", - "complex object literal"}, - {[-123, <<"foo">>, {[{<<"bar">>, []}]}, null], - "[-123,\"foo\",{\"bar\":[]},null]", - "complex array literal"} - ] -). - +-define(CASES, [ + {1, "1", "integer numeric literial"}, + % text representation may truncate, trail zeroes + {3.1416, "3.14160", "float numeric literal"}, + {-1, "-1", "negative integer numeric literal"}, + {-3.1416, "-3.14160", "negative float numeric literal"}, + {12.0e10, "1.20000e+11", "float literal in scientific notation"}, + {1.234E+10, "1.23400e+10", "another float literal in scientific notation"}, + {-1.234E-10, "-1.23400e-10", "negative float literal in scientific notation"}, + {10.0, "1.0e+01", "yet another float literal in scientific notation"}, + {123.456, "1.23456E+2", "yet another float literal in scientific notation"}, + {10.0, "1e1", "yet another float literal in scientific notation"}, + {<<"foo">>, "\"foo\"", "string literal"}, + {<<"foo", 5, "bar">>, "\"foo\\u0005bar\"", "string literal with \\u0005"}, + {<<"">>, "\"\"", "empty string literal"}, + {<<"\n\n\n">>, "\"\\n\\n\\n\"", "only new lines literal"}, + {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\"", "only white spaces string literal"}, + {null, "null", "null literal"}, + {true, "true", "true literal"}, + {false, "false", "false literal"}, + {<<"null">>, "\"null\"", "null string literal"}, + {<<"true">>, "\"true\"", "true string literal"}, + {<<"false">>, "\"false\"", "false string literal"}, + {{[]}, "{}", "empty object literal"}, + {{[{<<"foo">>, <<"bar">>}]}, "{\"foo\":\"bar\"}", "simple object literal"}, + { + {[{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]}, + "{\"foo\":\"bar\",\"baz\":123}", + "another simple object literal" + }, + {[], "[]", "empty array literal"}, + {[[]], "[[]]", "empty array literal inside a single element array literal"}, + {[1, <<"foo">>], "[1,\"foo\"]", "simple non-empty array literal"}, + {[1199344435545.0, 1], "[1199344435545.0,1]", "another simple non-empty array literal"}, + {[false, true, 321, null], "[false, true, 321, null]", "array of literals"}, + {{[{<<"foo">>, [123]}]}, "{\"foo\":[123]}", "object literal with an array valued property"}, + {{[{<<"foo">>, {[{<<"bar">>, true}]}}]}, "{\"foo\":{\"bar\":true}}", "nested object literal"}, + { + {[ + {<<"foo">>, []}, + {<<"bar">>, {[{<<"baz">>, true}]}}, + {<<"alice">>, <<"bob">>} + ]}, + "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}", + "complex object literal" + }, + { + [-123, <<"foo">>, {[{<<"bar">>, []}]}, null], + "[-123,\"foo\",{\"bar\":[]},null]", + "complex array literal" + } +]). raw_json_input_test_() -> Tests = lists:map( fun({EJson, JsonString, Desc}) -> - {Desc, - ?_assert(equiv(EJson, json_stream_parse:to_ejson(JsonString)))} - end, ?CASES), + {Desc, ?_assert(equiv(EJson, json_stream_parse:to_ejson(JsonString)))} + end, + ?CASES + ), {"Tests with raw JSON string as the input", Tests}. one_byte_data_fun_test_() -> Tests = lists:map( fun({EJson, JsonString, Desc}) -> DataFun = fun() -> single_byte_data_fun(JsonString) end, - {Desc, - ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))} - end, ?CASES), + {Desc, ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))} + end, + ?CASES + ), {"Tests with a 1 byte output data function as the input", Tests}. test_multiple_bytes_data_fun_test_() -> Tests = lists:map( fun({EJson, JsonString, Desc}) -> DataFun = fun() -> multiple_bytes_data_fun(JsonString) end, - {Desc, - ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))} - end, ?CASES), + {Desc, ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))} + end, + ?CASES + ), {"Tests with a multiple bytes output data function as the input", Tests}. - %% Test for equivalence of Erlang terms. %% Due to arbitrary order of construction, equivalent objects might %% compare unequal as erlang terms, so we need to carefully recurse @@ -120,7 +125,8 @@ equiv_object(Props1, Props2) -> fun({{K1, V1}, {K2, V2}}) -> equiv(K1, K2) andalso equiv(V1, V2) end, - Pairs). + Pairs + ). %% Recursively compare tuple elements for equivalence. equiv_list([], []) -> @@ -147,5 +153,5 @@ split(L, N) -> take(0, L, Acc) -> {lists:reverse(Acc), L}; -take(N, [H|L], Acc) -> +take(N, [H | L], Acc) -> take(N - 1, L, [H | Acc]). diff --git a/src/couch/test/eunit/test_web.erl b/src/couch/test/eunit/test_web.erl index b1b3e65c9..8998dad52 100644 --- a/src/couch/test/eunit/test_web.erl +++ b/src/couch/test/eunit/test_web.erl @@ -73,18 +73,18 @@ terminate(_Reason, _State) -> stop() -> mochiweb_http:stop(?SERVER). - handle_call({check_request, Req}, _From, State) when is_function(State, 1) -> - Resp2 = case (catch State(Req)) of - {ok, Resp} -> - {reply, {ok, Resp}, was_ok}; - {raw, Resp} -> - {reply, {raw, Resp}, was_ok}; - {chunked, Resp} -> - {reply, {chunked, Resp}, was_ok}; - Error -> - {reply, {error, Error}, not_ok} - end, + Resp2 = + case (catch State(Req)) of + {ok, Resp} -> + {reply, {ok, Resp}, was_ok}; + {raw, Resp} -> + {reply, {raw, Resp}, was_ok}; + {chunked, Resp} -> + {reply, {chunked, Resp}, was_ok}; + Error -> + {reply, {error, Error}, not_ok} + end, Req:cleanup(), Resp2; handle_call({check_request, _Req}, _From, _State) -> |