summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAaron Conole <aconole@redhat.com>2021-05-21 13:59:05 -0400
committerIlya Maximets <i.maximets@ovn.org>2021-06-15 10:46:04 +0200
commit64c3133c01cbbbf69ecf614a9a532f4306bea8d1 (patch)
treefb54ed23d1e4561d1dd8e01d3a6f781fd2b55b99
parentfe7bd42d6cad0cb1673019a9db01403461dbb7e7 (diff)
downloadopenvswitch-64c3133c01cbbbf69ecf614a9a532f4306bea8d1.tar.gz
ipf: Fix a use-after-free error, and remove the 'do_not_steal' flag.
As reported by Wang Liang, the way packets are passed to the ipf module doesn't allow for use later on in reassembly. Such packets may be get released anyway, such as during cleanup of tx processing. Because the ipf module lacks a way of forcing the dp_packet to be retained, it will later reuse the packet. Instead, just clone the packet and let the ipf queue own the copy until the queue is destroyed. After this change, there are no more in-tree users of the batch 'do_not_steal' flag. Thus, we remove it as well. Fixes: 4ea96698f667 ("Userspace datapath: Add fragmentation handling.") Fixes: 0b3ff31d35f5 ("dp-packet: Add 'do_not_steal' packet batch flag.") Reported-at: https://mail.openvswitch.org/pipermail/ovs-dev/2021-April/382098.html Reported-by: Wang Liang <wangliangrt@didiglobal.com> Signed-off-by: Aaron Conole <aconole@redhat.com> Co-authored-by: Wang Liang <wangliangrt@didiglobal.com> Signed-off-by: Wang Liang <wangliangrt@didiglobal.com> Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
-rw-r--r--lib/dp-packet.h2
-rw-r--r--lib/dpif-netdev.c1
-rw-r--r--lib/ipf.c19
3 files changed, 6 insertions, 16 deletions
diff --git a/lib/dp-packet.h b/lib/dp-packet.h
index 45655af46..dc8b773fe 100644
--- a/lib/dp-packet.h
+++ b/lib/dp-packet.h
@@ -950,7 +950,6 @@ enum { NETDEV_MAX_BURST = 32 }; /* Maximum number packets in a batch. */
struct dp_packet_batch {
size_t count;
bool trunc; /* true if the batch needs truncate. */
- bool do_not_steal; /* Indicate that the packets should not be stolen. */
struct dp_packet *packets[NETDEV_MAX_BURST];
};
@@ -959,7 +958,6 @@ dp_packet_batch_init(struct dp_packet_batch *batch)
{
batch->count = 0;
batch->trunc = false;
- batch->do_not_steal = false;
}
static inline void
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index dd5664d79..a73c4469d 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -3934,7 +3934,6 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute)
}
dp_packet_batch_init_packet(&pp, execute->packet);
- pp.do_not_steal = true;
dp_netdev_execute_actions(pmd, &pp, false, execute->flow,
execute->actions, execute->actions_len);
dp_netdev_pmd_flush_output_packets(pmd, true);
diff --git a/lib/ipf.c b/lib/ipf.c
index c20bcc0b3..9c83f1913 100644
--- a/lib/ipf.c
+++ b/lib/ipf.c
@@ -93,7 +93,6 @@ struct ipf_frag {
struct dp_packet *pkt;
uint16_t start_data_byte;
uint16_t end_data_byte;
- bool dnsteal; /* 'do not steal': if true, ipf should not free packet. */
};
/* The key for a collection of fragments potentially making up an unfragmented
@@ -795,8 +794,7 @@ ipf_is_frag_duped(const struct ipf_frag *frag_list, int last_inuse_idx,
static bool
ipf_process_frag(struct ipf *ipf, struct ipf_list *ipf_list,
struct dp_packet *pkt, uint16_t start_data_byte,
- uint16_t end_data_byte, bool ff, bool lf, bool v6,
- bool dnsteal)
+ uint16_t end_data_byte, bool ff, bool lf, bool v6)
OVS_REQUIRES(ipf->ipf_lock)
{
bool duped_frag = ipf_is_frag_duped(ipf_list->frag_list,
@@ -811,10 +809,9 @@ ipf_process_frag(struct ipf *ipf, struct ipf_list *ipf_list,
* recommend not setting the mempool number of buffers too low
* and also clamp the number of fragments. */
struct ipf_frag *frag = &ipf_list->frag_list[last_inuse_idx + 1];
- frag->pkt = pkt;
+ frag->pkt = dp_packet_clone(pkt);
frag->start_data_byte = start_data_byte;
frag->end_data_byte = end_data_byte;
- frag->dnsteal = dnsteal;
ipf_list->last_inuse_idx++;
atomic_count_inc(&ipf->nfrag);
ipf_count(ipf, v6, IPF_NFRAGS_ACCEPTED);
@@ -851,8 +848,7 @@ ipf_list_init(struct ipf_list *ipf_list, struct ipf_list_key *key,
* to a list of fragemnts. */
static bool
ipf_handle_frag(struct ipf *ipf, struct dp_packet *pkt, ovs_be16 dl_type,
- uint16_t zone, long long now, uint32_t hash_basis,
- bool dnsteal)
+ uint16_t zone, long long now, uint32_t hash_basis)
OVS_REQUIRES(ipf->ipf_lock)
{
struct ipf_list_key key;
@@ -921,7 +917,7 @@ ipf_handle_frag(struct ipf *ipf, struct dp_packet *pkt, ovs_be16 dl_type,
}
return ipf_process_frag(ipf, ipf_list, pkt, start_data_byte,
- end_data_byte, ff, lf, v6, dnsteal);
+ end_data_byte, ff, lf, v6);
}
/* Filters out fragments from a batch of fragments and adjust the batch. */
@@ -942,8 +938,7 @@ ipf_extract_frags_from_batch(struct ipf *ipf, struct dp_packet_batch *pb,
ipf_is_valid_v6_frag(ipf, pkt)))) {
ovs_mutex_lock(&ipf->ipf_lock);
- if (!ipf_handle_frag(ipf, pkt, dl_type, zone, now, hash_basis,
- pb->do_not_steal)) {
+ if (!ipf_handle_frag(ipf, pkt, dl_type, zone, now, hash_basis)) {
dp_packet_batch_refill(pb, pkt, pb_idx);
}
ovs_mutex_unlock(&ipf->ipf_lock);
@@ -1338,9 +1333,7 @@ ipf_destroy(struct ipf *ipf)
while (ipf_list->last_sent_idx < ipf_list->last_inuse_idx) {
struct dp_packet *pkt
= ipf_list->frag_list[ipf_list->last_sent_idx + 1].pkt;
- if (!ipf_list->frag_list[ipf_list->last_sent_idx + 1].dnsteal) {
- dp_packet_delete(pkt);
- }
+ dp_packet_delete(pkt);
atomic_count_dec(&ipf->nfrag);
ipf_list->last_sent_idx++;
}