summaryrefslogtreecommitdiff
path: root/xen/arch/x86/usercopy.c
blob: b8c2d1cc0bed1c0c963ea2141aa7514ea4ab70e9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
/* 
 * User address space access functions.
 *
 * Copyright 1997 Andi Kleen <ak@muc.de>
 * Copyright 1997 Linus Torvalds
 * Copyright 2002 Andi Kleen <ak@suse.de>
 */

#include <xen/lib.h>
#include <xen/sched.h>
#include <asm/uaccess.h>

#ifndef GUARD
# define GUARD UA_KEEP
#endif

unsigned int copy_to_guest_ll(void __user *to, const void *from, unsigned int n)
{
    unsigned dummy;

    stac();
    asm volatile (
        GUARD(
        "    guest_access_mask_ptr %[to], %q[scratch1], %q[scratch2]\n"
        )
        "    cmp  $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n"
        "    jbe  1f\n"
        "    mov  %k[to], %[cnt]\n"
        "    neg  %[cnt]\n"
        "    and  $"STR(BYTES_PER_LONG-1)", %[cnt]\n"
        "    sub  %[cnt], %[aux]\n"
        "4:  rep movsb\n" /* make 'to' address aligned */
        "    mov  %[aux], %[cnt]\n"
        "    shr  $"STR(LONG_BYTEORDER)", %[cnt]\n"
        "    and  $"STR(BYTES_PER_LONG-1)", %[aux]\n"
        "    .align 2,0x90\n"
        "0:  rep movs"__OS"\n" /* as many words as possible... */
        "    mov  %[aux],%[cnt]\n"
        "1:  rep movsb\n" /* ...remainder copied as bytes */
        "2:\n"
        ".section .fixup,\"ax\"\n"
        "5:  add %[aux], %[cnt]\n"
        "    jmp 2b\n"
        "3:  lea (%q[aux], %q[cnt], "STR(BYTES_PER_LONG)"), %[cnt]\n"
        "    jmp 2b\n"
        ".previous\n"
        _ASM_EXTABLE(4b, 5b)
        _ASM_EXTABLE(0b, 3b)
        _ASM_EXTABLE(1b, 2b)
        : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from),
          [aux] "=&r" (dummy)
          GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
        : "[aux]" (n)
        : "memory" );
    clac();

    return n;
}

unsigned int copy_from_guest_ll(void *to, const void __user *from, unsigned int n)
{
    unsigned dummy;

    stac();
    asm volatile (
        GUARD(
        "    guest_access_mask_ptr %[from], %q[scratch1], %q[scratch2]\n"
        )
        "    cmp  $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n"
        "    jbe  1f\n"
        "    mov  %k[to], %[cnt]\n"
        "    neg  %[cnt]\n"
        "    and  $"STR(BYTES_PER_LONG-1)", %[cnt]\n"
        "    sub  %[cnt], %[aux]\n"
        "4:  rep movsb\n" /* make 'to' address aligned */
        "    mov  %[aux],%[cnt]\n"
        "    shr  $"STR(LONG_BYTEORDER)", %[cnt]\n"
        "    and  $"STR(BYTES_PER_LONG-1)", %[aux]\n"
        "    .align 2,0x90\n"
        "0:  rep movs"__OS"\n" /* as many words as possible... */
        "    mov  %[aux], %[cnt]\n"
        "1:  rep movsb\n" /* ...remainder copied as bytes */
        "2:\n"
        ".section .fixup,\"ax\"\n"
        "5:  add  %[aux], %[cnt]\n"
        "    jmp 6f\n"
        "3:  lea  (%q[aux], %q[cnt], "STR(BYTES_PER_LONG)"), %[cnt]\n"
        "6:  mov  %[cnt], %k[from]\n"
        "    xchg %%eax, %[aux]\n"
        "    xor  %%eax, %%eax\n"
        "    rep stosb\n"
        "    xchg %[aux], %%eax\n"
        "    mov  %k[from], %[cnt]\n"
        "    jmp 2b\n"
        ".previous\n"
        _ASM_EXTABLE(4b, 5b)
        _ASM_EXTABLE(0b, 3b)
        _ASM_EXTABLE(1b, 6b)
        : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from),
          [aux] "=&r" (dummy)
          GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
        : "[aux]" (n)
        : "memory" );
    clac();

    return n;
}

#if GUARD(1) + 0

/**
 * copy_to_guest_pv: - Copy a block of data into PV guest space.
 * @to:   Destination address, in PV guest space.
 * @from: Source address, in hypervisor space.
 * @n:    Number of bytes to copy.
 *
 * Copy data from hypervisor space to PV guest space.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 */
unsigned int copy_to_guest_pv(void __user *to, const void *from, unsigned int n)
{
    if ( access_ok(to, n) )
        n = __copy_to_guest_pv(to, from, n);
    return n;
}

/**
 * clear_guest_pv: - Zero a block of memory in PV guest space.
 * @to:   Destination address, in PV guest space.
 * @n:    Number of bytes to zero.
 *
 * Zero a block of memory in PV guest space.
 *
 * Returns number of bytes that could not be cleared.
 * On success, this will be zero.
 */
unsigned int clear_guest_pv(void __user *to, unsigned int n)
{
    if ( access_ok(to, n) )
    {
        long dummy;

        stac();
        asm volatile (
            "    guest_access_mask_ptr %[to], %[scratch1], %[scratch2]\n"
            "0:  rep stos"__OS"\n"
            "    mov  %[bytes], %[cnt]\n"
            "1:  rep stosb\n"
            "2:\n"
            ".section .fixup,\"ax\"\n"
            "3:  lea  (%q[bytes], %q[longs], "STR(BYTES_PER_LONG)"), %[cnt]\n"
            "    jmp  2b\n"
            ".previous\n"
            _ASM_EXTABLE(0b,3b)
            _ASM_EXTABLE(1b,2b)
            : [cnt] "=&c" (n), [to] "+D" (to), [scratch1] "=&r" (dummy),
              [scratch2] "=&r" (dummy)
            : [bytes] "r" (n & (BYTES_PER_LONG - 1)),
              [longs] "0" (n / BYTES_PER_LONG), "a" (0) );
        clac();
    }

    return n;
}

/**
 * copy_from_guest_pv: - Copy a block of data from PV guest space.
 * @to:   Destination address, in hypervisor space.
 * @from: Source address, in PV guest space.
 * @n:    Number of bytes to copy.
 *
 * Copy data from PV guest space to hypervisor space.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 *
 * If some data could not be copied, this function will pad the copied
 * data to the requested size using zero bytes.
 */
unsigned int copy_from_guest_pv(void *to, const void __user *from,
                                unsigned int n)
{
    if ( access_ok(from, n) )
        n = __copy_from_guest_pv(to, from, n);
    else
        memset(to, 0, n);
    return n;
}

# undef GUARD
# define GUARD UA_DROP
# define copy_to_guest_ll copy_to_unsafe_ll
# define copy_from_guest_ll copy_from_unsafe_ll
# undef __user
# define __user
# include __FILE__

#endif /* GUARD(1) */

/*
 * Local variables:
 * mode: C
 * c-file-style: "BSD"
 * c-basic-offset: 4
 * tab-width: 4
 * indent-tabs-mode: nil
 * End:
 */