Searched refs:segment (Results 1 - 200 of 1006) sorted by relevance

123456

/linux-4.1.27/drivers/scsi/
H A Dlibiscsi_tcp.c71 struct iscsi_segment *segment);
88 * @segment: the buffer object
92 * This function sets up the segment so that subsequent
97 iscsi_tcp_segment_init_sg(struct iscsi_segment *segment, iscsi_tcp_segment_init_sg() argument
100 segment->sg = sg; iscsi_tcp_segment_init_sg()
101 segment->sg_offset = offset; iscsi_tcp_segment_init_sg()
102 segment->size = min(sg->length - offset, iscsi_tcp_segment_init_sg()
103 segment->total_size - segment->total_copied); iscsi_tcp_segment_init_sg()
104 segment->data = NULL; iscsi_tcp_segment_init_sg()
109 * @segment: iscsi_segment
116 static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv) iscsi_tcp_segment_map() argument
120 if (segment->data != NULL || !segment->sg) iscsi_tcp_segment_map()
123 sg = segment->sg; iscsi_tcp_segment_map()
124 BUG_ON(segment->sg_mapped); iscsi_tcp_segment_map()
137 segment->atomic_mapped = true; iscsi_tcp_segment_map()
138 segment->sg_mapped = kmap_atomic(sg_page(sg)); iscsi_tcp_segment_map()
140 segment->atomic_mapped = false; iscsi_tcp_segment_map()
142 segment->sg_mapped = kmap(sg_page(sg)); iscsi_tcp_segment_map()
145 segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; iscsi_tcp_segment_map()
148 void iscsi_tcp_segment_unmap(struct iscsi_segment *segment) iscsi_tcp_segment_unmap() argument
150 if (segment->sg_mapped) { iscsi_tcp_segment_unmap()
151 if (segment->atomic_mapped) iscsi_tcp_segment_unmap()
152 kunmap_atomic(segment->sg_mapped); iscsi_tcp_segment_unmap()
154 kunmap(sg_page(segment->sg)); iscsi_tcp_segment_unmap()
155 segment->sg_mapped = NULL; iscsi_tcp_segment_unmap()
156 segment->data = NULL; iscsi_tcp_segment_unmap()
165 iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest) iscsi_tcp_segment_splice_digest() argument
167 segment->data = digest; iscsi_tcp_segment_splice_digest()
168 segment->digest_len = ISCSI_DIGEST_SIZE; iscsi_tcp_segment_splice_digest()
169 segment->total_size += ISCSI_DIGEST_SIZE; iscsi_tcp_segment_splice_digest()
170 segment->size = ISCSI_DIGEST_SIZE; iscsi_tcp_segment_splice_digest()
171 segment->copied = 0; iscsi_tcp_segment_splice_digest()
172 segment->sg = NULL; iscsi_tcp_segment_splice_digest()
173 segment->hash = NULL; iscsi_tcp_segment_splice_digest()
177 * iscsi_tcp_segment_done - check whether the segment is complete
179 * @segment: iscsi segment to check
183 * Check if we're done receiving this segment. If the receive
193 struct iscsi_segment *segment, int recv, iscsi_tcp_segment_done()
200 segment->copied, copied, segment->size, iscsi_tcp_segment_done()
202 if (segment->hash && copied) { iscsi_tcp_segment_done()
204 * If a segment is kmapd we must unmap it before sending iscsi_tcp_segment_done()
207 iscsi_tcp_segment_unmap(segment); iscsi_tcp_segment_done()
209 if (!segment->data) { iscsi_tcp_segment_done()
211 sg_set_page(&sg, sg_page(segment->sg), copied, iscsi_tcp_segment_done()
212 segment->copied + segment->sg_offset + iscsi_tcp_segment_done()
213 segment->sg->offset); iscsi_tcp_segment_done()
215 sg_init_one(&sg, segment->data + segment->copied, iscsi_tcp_segment_done()
217 crypto_hash_update(segment->hash, &sg, copied); iscsi_tcp_segment_done()
220 segment->copied += copied; iscsi_tcp_segment_done()
221 if (segment->copied < segment->size) { iscsi_tcp_segment_done()
222 iscsi_tcp_segment_map(segment, recv); iscsi_tcp_segment_done()
226 segment->total_copied += segment->copied; iscsi_tcp_segment_done()
227 segment->copied = 0; iscsi_tcp_segment_done()
228 segment->size = 0; iscsi_tcp_segment_done()
231 iscsi_tcp_segment_unmap(segment); iscsi_tcp_segment_done()
235 segment->total_copied, segment->total_size); iscsi_tcp_segment_done()
236 if (segment->total_copied < segment->total_size) { iscsi_tcp_segment_done()
238 iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg), iscsi_tcp_segment_done()
240 iscsi_tcp_segment_map(segment, recv); iscsi_tcp_segment_done()
241 BUG_ON(segment->size == 0); iscsi_tcp_segment_done()
247 pad = iscsi_padding(segment->total_copied); iscsi_tcp_segment_done()
251 segment->total_size += pad; iscsi_tcp_segment_done()
252 segment->size = pad; iscsi_tcp_segment_done()
253 segment->data = segment->padbuf; iscsi_tcp_segment_done()
262 if (segment->hash) { iscsi_tcp_segment_done()
263 crypto_hash_final(segment->hash, segment->digest); iscsi_tcp_segment_done()
264 iscsi_tcp_segment_splice_digest(segment, iscsi_tcp_segment_done()
265 recv ? segment->recv_digest : segment->digest); iscsi_tcp_segment_done()
274 * iscsi_tcp_segment_recv - copy data to segment
276 * @segment: the buffer to copy to
292 struct iscsi_segment *segment, const void *ptr, iscsi_tcp_segment_recv()
297 while (!iscsi_tcp_segment_done(tcp_conn, segment, 1, copy)) { iscsi_tcp_segment_recv()
304 copy = min(len - copied, segment->size - segment->copied); iscsi_tcp_segment_recv()
306 memcpy(segment->data + segment->copied, ptr + copied, copy); iscsi_tcp_segment_recv()
325 struct iscsi_segment *segment) iscsi_tcp_dgst_verify()
327 if (!segment->digest_len) iscsi_tcp_dgst_verify()
330 if (memcmp(segment->recv_digest, segment->digest, iscsi_tcp_dgst_verify()
331 segment->digest_len)) { iscsi_tcp_dgst_verify()
340 * Helper function to set up segment buffer
343 __iscsi_segment_init(struct iscsi_segment *segment, size_t size, __iscsi_segment_init() argument
346 memset(segment, 0, sizeof(*segment)); __iscsi_segment_init()
347 segment->total_size = size; __iscsi_segment_init()
348 segment->done = done; __iscsi_segment_init()
351 segment->hash = hash; __iscsi_segment_init()
357 iscsi_segment_init_linear(struct iscsi_segment *segment, void *data, iscsi_segment_init_linear() argument
361 __iscsi_segment_init(segment, size, done, hash); iscsi_segment_init_linear()
362 segment->data = data; iscsi_segment_init_linear()
363 segment->size = size; iscsi_segment_init_linear()
368 iscsi_segment_seek_sg(struct iscsi_segment *segment, iscsi_segment_seek_sg() argument
376 __iscsi_segment_init(segment, size, done, hash); for_each_sg()
379 iscsi_tcp_segment_init_sg(segment, sg, offset); for_each_sg()
390 * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
402 iscsi_segment_init_linear(&tcp_conn->in.segment, iscsi_tcp_hdr_recv_prep()
413 struct iscsi_segment *segment) iscsi_tcp_data_recv_done()
418 if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) iscsi_tcp_data_recv_done()
440 iscsi_segment_init_linear(&tcp_conn->in.segment, iscsi_tcp_data_recv_prep()
614 struct iscsi_segment *segment) iscsi_tcp_process_data_in()
620 if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) iscsi_tcp_process_data_in()
709 rc = iscsi_segment_seek_sg(&tcp_conn->in.segment, iscsi_tcp_hdr_dissect()
803 struct iscsi_segment *segment) iscsi_tcp_hdr_recv_done()
813 if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) { iscsi_tcp_hdr_recv_done()
823 segment->total_size += ahslen; iscsi_tcp_hdr_recv_done()
824 segment->size += ahslen; iscsi_tcp_hdr_recv_done()
833 if (segment->digest_len == 0) { iscsi_tcp_hdr_recv_done()
836 * splice it in so we can increment the skb/segment iscsi_tcp_hdr_recv_done()
837 * counters in preparation for the data segment. iscsi_tcp_hdr_recv_done()
839 iscsi_tcp_segment_splice_digest(segment, iscsi_tcp_hdr_recv_done()
840 segment->recv_digest); iscsi_tcp_hdr_recv_done()
845 segment->total_copied - ISCSI_DIGEST_SIZE, iscsi_tcp_hdr_recv_done()
846 segment->digest); iscsi_tcp_hdr_recv_done()
848 if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) iscsi_tcp_hdr_recv_done()
865 return tcp_conn->in.segment.done == iscsi_tcp_hdr_recv_done; iscsi_tcp_recv_segment_is_hdr()
872 * @skb: network buffer with header and/or data segment
883 struct iscsi_segment *segment = &tcp_conn->in.segment; iscsi_tcp_recv_skb() local
903 segment->total_copied = segment->total_size; iscsi_tcp_recv_skb()
919 BUG_ON(segment->copied >= segment->size); iscsi_tcp_recv_skb()
923 rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail); iscsi_tcp_recv_skb()
927 if (segment->total_copied >= segment->total_size) { iscsi_tcp_recv_skb()
935 ISCSI_DBG_TCP(conn, "segment done\n"); iscsi_tcp_recv_skb()
936 rc = segment->done(tcp_conn, segment); iscsi_tcp_recv_skb()
943 /* The done() functions sets up the next segment. */ iscsi_tcp_recv_skb()
192 iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment, int recv, unsigned copied) iscsi_tcp_segment_done() argument
291 iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment, const void *ptr, unsigned int len) iscsi_tcp_segment_recv() argument
324 iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment) iscsi_tcp_dgst_verify() argument
412 iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment) iscsi_tcp_data_recv_done() argument
613 iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment) iscsi_tcp_process_data_in() argument
802 iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment) iscsi_tcp_hdr_recv_done() argument
H A Dqla1280.h527 __le16 dseg_count; /* Data segment count. */
529 __le32 dseg_0_address; /* Data segment 0 address. */
530 __le32 dseg_0_length; /* Data segment 0 length. */
531 __le32 dseg_1_address; /* Data segment 1 address. */
532 __le32 dseg_1_length; /* Data segment 1 length. */
533 __le32 dseg_2_address; /* Data segment 2 address. */
534 __le32 dseg_2_length; /* Data segment 2 length. */
535 __le32 dseg_3_address; /* Data segment 3 address. */
536 __le32 dseg_3_length; /* Data segment 3 length. */
549 __le32 dseg_0_address; /* Data segment 0 address. */
550 __le32 dseg_0_length; /* Data segment 0 length. */
551 __le32 dseg_1_address; /* Data segment 1 address. */
552 __le32 dseg_1_length; /* Data segment 1 length. */
553 __le32 dseg_2_address; /* Data segment 2 address. */
554 __le32 dseg_2_length; /* Data segment 2 length. */
555 __le32 dseg_3_address; /* Data segment 3 address. */
556 __le32 dseg_3_length; /* Data segment 3 length. */
557 __le32 dseg_4_address; /* Data segment 4 address. */
558 __le32 dseg_4_length; /* Data segment 4 length. */
559 __le32 dseg_5_address; /* Data segment 5 address. */
560 __le32 dseg_5_length; /* Data segment 5 length. */
561 __le32 dseg_6_address; /* Data segment 6 address. */
562 __le32 dseg_6_length; /* Data segment 6 length. */
634 __le16 dseg_count; /* Data segment count. */
654 __le16 dseg_count; /* Data segment count. */
657 __le32 dseg_0_address[2]; /* Data segment 0 address. */
658 __le32 dseg_0_length; /* Data segment 0 length. */
659 __le32 dseg_1_address[2]; /* Data segment 1 address. */
660 __le32 dseg_1_length; /* Data segment 1 length. */
672 __le32 dseg_0_address[2]; /* Data segment 0 address. */
673 __le32 dseg_0_length; /* Data segment 0 length. */
674 __le32 dseg_1_address[2]; /* Data segment 1 address. */
675 __le32 dseg_1_length; /* Data segment 1 length. */
676 __le32 dseg_2_address[2]; /* Data segment 2 address. */
677 __le32 dseg_2_length; /* Data segment 2 length. */
678 __le32 dseg_3_address[2]; /* Data segment 3 address. */
679 __le32 dseg_3_length; /* Data segment 3 length. */
680 __le32 dseg_4_address[2]; /* Data segment 4 address. */
681 __le32 dseg_4_length; /* Data segment 4 length. */
829 __le16 dseg_count; /* Data segment count. */
830 __le32 dseg_0_address; /* Data segment 0 address. */
831 __le32 dseg_0_length; /* Data segment 0 length. */
832 __le32 dseg_1_address; /* Data segment 1 address. */
833 __le32 dseg_1_length; /* Data segment 1 length. */
834 __le32 dseg_2_address; /* Data segment 2 address. */
835 __le32 dseg_2_length; /* Data segment 2 length. */
836 __le32 dseg_3_address; /* Data segment 3 address. */
837 __le32 dseg_3_length; /* Data segment 3 length. */
862 __le16 dseg_count; /* Data segment count. */
863 __le32 dseg_0_address; /* Data segment 0 address. */
864 __le32 dseg_0_length; /* Data segment 0 length. */
865 __le32 dseg_1_address; /* Data segment 1 address. */
866 __le16 dseg_1_length; /* Data segment 1 length. */
892 __le16 dseg_count; /* Data segment count. */
894 __le32 dseg_0_address[2];/* Data segment 0 address. */
895 __le32 dseg_0_length; /* Data segment 0 length. */
896 __le32 dseg_1_address[2];/* Data segment 1 address. */
897 __le32 dseg_1_length; /* Data segment 1 length. */
922 __le16 dseg_count; /* Data segment count. */
H A Discsi_tcp.c156 iscsi_tcp_segment_unmap(&tcp_conn->in.segment); iscsi_sw_tcp_data_ready()
252 * iscsi_sw_tcp_xmit_segment - transmit segment
254 * @segment: the buffer to transmnit
261 * hash as it goes. When the entire segment has been transmitted,
265 struct iscsi_segment *segment) iscsi_sw_tcp_xmit_segment()
272 while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) { iscsi_sw_tcp_xmit_segment()
278 offset = segment->copied; iscsi_sw_tcp_xmit_segment()
279 copy = segment->size - offset; iscsi_sw_tcp_xmit_segment()
281 if (segment->total_copied + segment->size < segment->total_size) iscsi_sw_tcp_xmit_segment()
285 if (!segment->data) { iscsi_sw_tcp_xmit_segment()
286 sg = segment->sg; iscsi_sw_tcp_xmit_segment()
287 offset += segment->sg_offset + sg->offset; iscsi_sw_tcp_xmit_segment()
293 .iov_base = segment->data + offset, iscsi_sw_tcp_xmit_segment()
301 iscsi_tcp_segment_unmap(segment); iscsi_sw_tcp_xmit_segment()
316 struct iscsi_segment *segment = &tcp_sw_conn->out.segment; iscsi_sw_tcp_xmit() local
321 rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment); iscsi_sw_tcp_xmit()
337 if (segment->total_copied >= segment->total_size) { iscsi_sw_tcp_xmit()
338 if (segment->done != NULL) { iscsi_sw_tcp_xmit()
339 rc = segment->done(tcp_conn, segment); iscsi_sw_tcp_xmit()
366 struct iscsi_segment *segment = &tcp_sw_conn->out.segment; iscsi_sw_tcp_xmit_qlen() local
368 return segment->total_copied - segment->total_size; iscsi_sw_tcp_xmit_qlen()
396 * Simply copy the data_segment to the send segment, and return.
399 struct iscsi_segment *segment) iscsi_sw_tcp_send_hdr_done()
403 tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment; iscsi_sw_tcp_send_hdr_done()
405 "Header done. Next segment size %u total_size %u\n", iscsi_sw_tcp_send_hdr_done()
406 tcp_sw_conn->out.segment.size, iscsi_sw_tcp_send_hdr_done()
407 tcp_sw_conn->out.segment.total_size); iscsi_sw_tcp_send_hdr_done()
420 /* Clear the data segment - needs to be filled in by the iscsi_sw_tcp_send_hdr_prep()
441 iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen, iscsi_sw_tcp_send_hdr_prep()
264 iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment) iscsi_sw_tcp_xmit_segment() argument
398 iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment) iscsi_sw_tcp_send_hdr_done() argument
H A Dvmw_pvscsi.h52 BTSTAT_INVPARAM = 0x1a, /* invalid parameter in CCB or segment
296 * s/g table segment, each s/g segment is entirely contained on a single
345 * table segment.
347 * - each segment of the s/g table contain a succession of struct
349 * - each segment is entirely contained on a single physical page of memory.
352 * * addr is the PA of the next s/g segment,
H A Discsi_tcp.h34 struct iscsi_segment segment; member in struct:iscsi_sw_tcp_send
/linux-4.1.27/fs/nilfs2/
H A DMakefile4 the_nilfs.o segbuf.o segment.o cpfile.o sufile.o \
H A Dsufile.h2 * sufile.h - NILFS segment usage file.
72 * nilfs_sufile_scrap - make a segment garbage
73 * @sufile: inode of segment usage file
74 * @segnum: segment number to be freed
82 * nilfs_sufile_free - free segment
83 * @sufile: inode of segment usage file
84 * @segnum: segment number to be freed
93 * @sufile: inode of segment usage file
94 * @segnumv: array of segment numbers
107 * @sufile: inode of segment usage file
108 * @segnumv: array of segment numbers
124 * nilfs_sufile_set_error - mark a segment as erroneous
125 * @sufile: inode of segment usage file
126 * @segnum: segment number
128 * Description: nilfs_sufile_set_error() marks the segment specified by
129 * @segnum as erroneous. The error segment will never be used again.
138 * %-EINVAL - Invalid segment usage number.
H A Dsegbuf.h32 * struct nilfs_segsum_info - On-memory segment summary
35 * @nblocks: Number of blocks included in the partial segment
37 * @sumbytes: Byte count of segment summary
42 * @next: Block number of the next full segment
61 * @sb_sum: On-memory segment summary
62 * @sb_segnum: Index number of the full segment
63 * @sb_nextnum: Index number of the next full segment
64 * @sb_fseg_start: Start block number of the full segment
65 * @sb_fseg_end: End block number of the full segment
66 * @sb_pseg_start: Disk block number of partial segment
67 * @sb_rest_blocks: Number of residual blocks in the current segment
68 * @sb_segsum_buffers: List of buffers for segment summaries
69 * @sb_payload_buffers: List of buffers for segment payload
H A Dsegment.h2 * segment.h - NILFS Segment constructor prototypes and definitions
42 * @ri_lseg_start_seq: Sequence value of the segment at ri_lsegs_start
44 * @ri_pseg_start: Block number of the last partial segment
45 * @ri_seq: Sequence number on the last partial segment
46 * @ri_segnum: Segment number on the last partial segment
47 * @ri_nextnum: Next segment number on the last partial segment
98 * @sc_freesegs: array of segment numbers to be freed
103 * @sc_segbufs: List of segment buffers
104 * @sc_write_logs: List of segment buffers to hold logs under writing
105 * @sc_segbuf_nblocks: Number of available blocks in segment buffers.
106 * @sc_curseg: Current segment buffer
108 * @sc_finfo_ptr: pointer to the current finfo struct in the segment summary
109 * @sc_binfo_ptr: pointer to the current binfo struct in the segment summary
112 * @sc_nblk_this_inc: Number of blocks included in the current logical segment
128 * @sc_lseg_stime: Start time of the latest logical segment
193 NILFS_SC_UNCLOSED, /* Logical segment is not closed */
194 NILFS_SC_SUPER_ROOT, /* The latest segment has a super root */
217 logical segment with a super root */
229 /* segment.c */
H A Dsufile.c2 * sufile.c - NILFS segment usage file.
37 * @allocmin: lower limit of allocatable segment range
38 * @allocmax: upper limit of allocatable segment range
43 __u64 allocmin; /* lower limit of allocatable segment range */
44 __u64 allocmax; /* upper limit of allocatable segment range */
131 * @sufile: inode of segment usage file
139 * nilfs_sufile_updatev - modify multiple segment usages at a time
140 * @sufile: inode of segment usage file
141 * @segnumv: array of segment numbers
150 * segment usage entry is contained. If @ndone is given, the number
161 * %-ENOENT - Given segment usage is in hole block (may be returned if
164 * %-EINVAL - Invalid segment usage number
185 "%s: invalid segment number: %llu\n", __func__, nilfs_sufile_updatev()
243 printk(KERN_WARNING "%s: invalid segment number: %llu\n", nilfs_sufile_update()
266 * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
267 * @sufile: inode of segment usage file
268 * @start: minimum segment number of allocatable region (inclusive)
269 * @end: maximum segment number of allocatable region (inclusive)
274 * %-ERANGE - invalid segment region
295 * nilfs_sufile_alloc - allocate a segment
296 * @sufile: inode of segment usage file
297 * @segnump: pointer to segment number
299 * Description: nilfs_sufile_alloc() allocates a clean segment.
301 * Return Value: On success, 0 is returned and the segment number of the
302 * allocated segment is stored in the place pointed by @segnump. On error, one
309 * %-ENOSPC - No clean segment left.
374 /* found a clean segment */ nilfs_sufile_alloc()
419 printk(KERN_WARNING "%s: segment %llu must be clean\n", nilfs_sufile_do_cancel_free()
452 /* make the segment garbage */ nilfs_sufile_do_scrap()
476 printk(KERN_WARNING "%s: segment %llu is already clean\n", nilfs_sufile_do_free()
496 * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
497 * @sufile: inode of segment usage file
498 * @segnum: segment number
515 * nilfs_sufile_set_segment_usage - set usage of a segment
516 * @sufile: inode of segment usage file
517 * @segnum: segment number
518 * @nblocks: number of live blocks in the segment
552 * nilfs_sufile_get_stat - get segment usage statistics
553 * @sufile: inode of segment usage file
554 * @stat: pointer to a structure of segment usage statistics
556 * Description: nilfs_sufile_get_stat() returns information about segment
559 * Return Value: On success, 0 is returned, and segment usage information is
626 * nilfs_sufile_truncate_range - truncate range of segment array
627 * @sufile: inode of segment usage file
628 * @start: start segment number (inclusive)
629 * @end: end segment number (inclusive)
731 * nilfs_sufile_resize - resize segment array
732 * @sufile: inode of segment usage file
799 * @sufile: inode of segment usage file
800 * @segnum: segment number to start looking
873 * nilfs_sufile_set_suinfo - sets segment usage info
874 * @sufile: inode of segment usage file
880 * segment usage accordingly. Only the fields indicated by the sup_flags
890 * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
1005 * @sufile: inode of segment usage file
1014 * and start+len is rounded down. For each clean segment blkdev_issue_discard
1158 * @susize: size of a segment usage entry
1174 "NILFS: too large segment usage size: %zu bytes.\n", nilfs_sufile_read()
1179 "NILFS: too small segment usage size: %zu bytes.\n", nilfs_sufile_read()
H A Dthe_nilfs.h60 * @ns_seg_seq: segment sequence counter
61 * @ns_segnum: index number of the latest full segment.
62 * @ns_nextnum: index number of the full segment index to be used next
63 * @ns_pseg_offset: offset of next partial segment in the current full segment
65 * @ns_ctime: write time of the last segment
66 * @ns_nongc_ctime: write time of the last segment not for cleaner operation
68 * @ns_last_segment_lock: lock protecting fields for the latest segment
69 * @ns_last_pseg: start block number of the latest segment
70 * @ns_last_seq: sequence value of the latest segment
71 * @ns_last_cno: checkpoint number of the latest segment
94 * @ns_blocks_per_segment: number of blocks per segment
116 * - allocating a new full segment.
128 * Except for the period seeking checkpoint, code outside the segment
129 * constructor must lock a segment semaphore while accessing these
143 * The following fields hold information on the latest partial segment
349 /* terminate the current full segment (used in case of I/O-error) */ nilfs_terminate_segment()
355 /* move forward with a full segment */ nilfs_shift_to_next_segment()
H A Drecovery.c29 #include "segment.h"
64 "NILFS warning: I/O error on loading last segment\n"); nilfs_warn_segment_error()
80 "NILFS warning: Checksum error in segment payload\n"); nilfs_warn_segment_error()
84 "NILFS warning: Inconsistent segment\n"); nilfs_warn_segment_error()
88 "NILFS warning: No super root in the last segment\n"); nilfs_warn_segment_error()
190 * @sum: pointer to return segment summary structure
207 * @seg_seq: sequence number of segment
209 * @sum: segment summary struct
444 * Releasing the next segment of the latest super root. nilfs_prepare_segment_for_recovery()
445 * The next segment is invalidated by this recovery. nilfs_prepare_segment_for_recovery()
587 sector_t seg_start, seg_end; /* Starting/ending DBN of full segment */ nilfs_do_roll_forward()
627 /* Found a valid partial segment; do recovery actions */ nilfs_do_roll_forward()
674 /* Looking to the next full segment */ nilfs_do_roll_forward()
801 * segment pointed by the superblock. It sets up struct the_nilfs through
807 * %-EINVAL - No valid segment found
819 sector_t seg_start, seg_end; /* range of full segment (block number) */ nilfs_search_super_root()
835 /* Calculate range of segment */ nilfs_search_super_root()
838 /* Read ahead segment */ nilfs_search_super_root()
864 /* A valid partial segment */ nilfs_search_super_root()
937 /* Looking to the next full segment */ nilfs_search_super_root()
H A Dsegment.c2 * segment.c - NILFS segment constructor.
40 #include "segment.h"
57 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
59 a logical segment without a super root */
60 SC_FLUSH_FILE, /* Flush data files, leads to segment writes without
83 #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
161 * the segment semaphore, to make a segment construction and write tasks
220 * nilfs_transaction_commit() sets a timer to start the segment
357 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
386 return -E2BIG; /* The current segment is filled up nilfs_segctor_feed_segment()
410 * Functions for making segment summary and payloads
978 /* Remaining number of blocks within segment buffer */ nilfs_segctor_buffer_rest()
1185 /* End of a logical segment */
1213 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1240 /* Start from the head of a new full segment */ nilfs_segctor_begin_construction()
1288 * Since the segment specified with nextnum might be allocated during nilfs_segctor_extend_segments()
1298 /* extend segment info */ nilfs_segctor_extend_segments()
1304 /* map this buffer to region of segment on-disk */ nilfs_segctor_extend_segments()
1308 /* allocate the next next full segment */ nilfs_segctor_extend_segments()
1346 /* Case 1: The first segment failed */ nilfs_free_incomplete_logs()
1348 /* Case 1a: Partial segment appended into an existing nilfs_free_incomplete_logs()
1349 segment */ nilfs_free_incomplete_logs()
1352 else /* Case 1b: New full segment */ nilfs_free_incomplete_logs()
1364 /* Case 2: extended segment (!= next) failed */ list_for_each_entry_continue()
1444 /* The current segment is filled up */ nilfs_segctor_collect()
1649 * or more because they might be split in a segment. nilfs_end_page_io()
1820 * Since pages may continue over multiple segment buffers, nilfs_segctor_complete_write()
1942 * Main procedure of segment constructor
1976 /* Avoid empty segment */ nilfs_segctor_do_construct()
2045 * This function MUST be called within a section locking the segment
2073 * nilfs_flush_segment - trigger a segment construction for resource control
2151 * nilfs_construct_segment - construct a logical segment
2185 * nilfs_construct_dsync_segment - construct a data-only logical segment
2253 * @sci: segment constructor object
2265 * @sci: segment constructor object
2295 * @sci: segment constructor object
2389 "segment construction failed. (err=%d)", err); nilfs_clean_segments()
2420 * Unclosed segment should be retried. We do this using sc_timer. nilfs_segctor_thread_construct()
2422 * to close the current logical segment. nilfs_segctor_thread_construct()
2464 * nilfs_segctor_thread - main loop of the segment constructor thread.
2468 * to execute segment constructions.
2641 * nilfs_segctor_destroy - destroy the segment constructor.
2646 * Caller must hold the segment semaphore.
H A Dsegbuf.c2 * segbuf.c - NILFS segment buffer
87 * @segbuf: new segment buffer
88 * @prev: segment buffer containing a log to be continued
155 * Setup segment summary
272 * Iterators for segment buffers
323 * @logs: list of segment buffers storing target logs
513 * @segbuf: segment buffer
532 printk(KERN_ERR "NILFS: IO error writing segment\n"); nilfs_segbuf_wait()
H A Dsysfs.c342 "\tshow checkpoint number of the latest segment.\n\n"
417 printk(KERN_ERR "NILFS: unable to get segment stat: err=%d\n", nilfs_segments_dirty_segments_show()
429 "(2) blocks_per_segment\n\tshow number of blocks in segment.\n\n"
652 "\tshow start block number of the latest segment.\n\n"
654 "\tshow sequence value of the latest segment.\n\n"
656 "\tshow checkpoint number of the latest segment.\n\n"
657 "(4) current_seg_sequence\n\tshow segment sequence counter.\n\n"
659 "\tshow index number of the latest full segment.\n\n"
661 "\tshow index number of the full segment index to be used next.\n\n"
663 "\tshow offset of next partial segment in the current full segment.\n\n"
666 "\tshow write time of the last segment in human-readable format.\n\n"
668 "\tshow write time of the last segment in seconds.\n\n"
670 "\tshow write time of the last segment not for cleaner operation "
673 "\tshow write time of the last segment not for cleaner operation "
/linux-4.1.27/arch/m68k/sun3/prom/
H A DMakefile6 #bootstr.o init.o misc.o segment.o console.o printf.o
/linux-4.1.27/arch/x86/purgatory/
H A Dsetup-x86_64.S19 /* Load a gdt so I know what the segment registers are */
23 movl $0x18, %eax /* data segment */
39 gdt: /* 0x00 unusable segment
47 /* 0x10 4GB flat code segment */
50 /* 0x18 4GB flat data segment */
H A Dentry64.S24 movl $0x18, %eax /* data segment */
86 /* 0x00 unusable segment
94 /* 0x10 4GB flat code segment */
97 /* 0x18 4GB flat data segment */
/linux-4.1.27/fs/romfs/
H A Dstorage.c44 size_t segment; romfs_mtd_strnlen() local
51 segment = min_t(size_t, maxlen, 16); romfs_mtd_strnlen()
52 ret = ROMFS_MTD_READ(sb, pos, segment, &len, buf); romfs_mtd_strnlen()
74 size_t len, segment; romfs_mtd_strcmp() local
82 segment = min_t(size_t, size + 1, 17); romfs_mtd_strcmp()
83 ret = ROMFS_MTD_READ(sb, pos, segment, &len, buf); romfs_mtd_strcmp()
112 size_t segment; romfs_blk_read() local
117 segment = min_t(size_t, buflen, ROMBSIZE - offset); romfs_blk_read()
121 memcpy(buf, bh->b_data + offset, segment); romfs_blk_read()
123 buf += segment; romfs_blk_read()
124 buflen -= segment; romfs_blk_read()
125 pos += segment; romfs_blk_read()
140 size_t segment; romfs_blk_strnlen() local
146 segment = min_t(size_t, limit, ROMBSIZE - offset); romfs_blk_strnlen()
151 p = memchr(buf, 0, segment); romfs_blk_strnlen()
155 limit -= segment; romfs_blk_strnlen()
156 pos += segment; romfs_blk_strnlen()
157 n += segment; romfs_blk_strnlen()
172 size_t segment; romfs_blk_strcmp() local
178 segment = min_t(size_t, size, ROMBSIZE - offset); romfs_blk_strcmp()
182 matched = (memcmp(bh->b_data + offset, str, segment) == 0); romfs_blk_strcmp()
184 size -= segment; romfs_blk_strcmp()
185 pos += segment; romfs_blk_strcmp()
186 str += segment; romfs_blk_strcmp()
187 if (matched && size == 0 && offset + segment < ROMBSIZE) { romfs_blk_strcmp()
188 if (!bh->b_data[offset + segment]) romfs_blk_strcmp()
/linux-4.1.27/arch/ia64/include/uapi/asm/
H A Dmman.h13 #define MAP_GROWSUP 0x0200 /* register stack-like segment */
H A Dshmbuf.h15 size_t shm_segsz; /* size of segment (bytes) */
/linux-4.1.27/fs/f2fs/
H A DMakefile4 f2fs-y += checkpoint.o gc.o data.o node.o segment.o recovery.o
H A Dsegment.h2 * fs/f2fs/segment.h
19 /* L: Logical segment # in volume, R: Relative segment # in main area */
145 /* for a function parameter to select a victim segment */
149 unsigned long *dirty_segmap; /* dirty segment bitmap */
154 unsigned int min_segno; /* segment # having min. cost */
166 unsigned char type; /* segment type like CURSEG_XXX_TYPE */
167 unsigned long long mtime; /* modification time of the segment */
197 struct seg_entry *sentries; /* SIT segment-level cache */
208 unsigned int start_segno; /* start segment number logically */
212 unsigned long *free_segmap; /* free segment bitmap */
232 struct mutex seglist_lock; /* lock for segment bitmaps */
248 unsigned int segno; /* current segment number */
251 unsigned int next_segno; /* preallocated segment */
578 /* check segment usage */ check_block_count()
581 /* check boundary of a given segment number */ check_block_count()
619 /* check segment usage */ check_block_count()
623 /* check boundary of a given segment number */ check_block_count()
730 * When writing pages, it'd better align nr_to_write for segment size.
/linux-4.1.27/arch/s390/include/asm/
H A Dextmem.h2 * definitions for external memory segment support
10 /* possible values for segment type as returned by segment_info */
H A Duser.h44 * doesn't use the extra segment registers)
55 unsigned long int u_tsize; /* Text segment size (pages). */
56 unsigned long int u_dsize; /* Data segment size (pages). */
57 unsigned long int u_ssize; /* Stack segment size (pages). */
H A Dpgtable.h80 * for S390 segment-table entries are combined to one PGD
237 /* Bits in the segment/region table address-space-control-element */
238 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
247 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
251 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
254 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
270 /* Bits in the segment table entry */
274 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
276 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
281 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
282 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
285 #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
286 #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
303 * The segment table origin is used to distinguish empty (origin==0) from
304 * read-write, old segment table entries (origin!=0)
439 * entry or a segment table entry. Check for the bit that are pgd_bad()
473 * entry or a segment table entry. Check for the bit that are pud_bad()
697 * @host_to_guest: radix tree with pointer to segment table entries
1293 * Convert to segment table entry format. massage_pgprot_pmd()
/linux-4.1.27/arch/x86/math-emu/
H A Dget_address.c121 static unsigned long vm86_segment(u_char segment, struct address *addr) vm86_segment() argument
123 segment--; vm86_segment()
125 if (segment > PREFIX_SS_) { vm86_segment()
130 addr->selector = VM86_REG_(segment); vm86_segment()
131 return (unsigned long)VM86_REG_(segment) << 4; vm86_segment()
135 static long pm_address(u_char FPU_modrm, u_char segment, pm_address() argument
141 segment--; pm_address()
144 /* segment is unsigned, so this also detects if segment was 0: */ pm_address()
145 if (segment > PREFIX_SS_) { pm_address()
151 switch (segment) { pm_address()
157 addr->selector = PM_REG_(segment); pm_address()
218 && (addr_modes.override.segment == PREFIX_CS_)) { FPU_get_address()
278 address += vm86_segment(addr_modes.override.segment, addr); FPU_get_address()
282 address = pm_address(FPU_modrm, addr_modes.override.segment, FPU_get_address()
302 && (addr_modes.override.segment == PREFIX_CS_)) { FPU_get_address_16()
353 if (addr_modes.override.segment == PREFIX_DEFAULT) FPU_get_address_16()
354 addr_modes.override.segment = PREFIX_SS_; FPU_get_address_16()
358 if (addr_modes.override.segment == PREFIX_DEFAULT) FPU_get_address_16()
359 addr_modes.override.segment = PREFIX_SS_; FPU_get_address_16()
369 if (addr_modes.override.segment == PREFIX_DEFAULT) FPU_get_address_16()
370 addr_modes.override.segment = PREFIX_SS_; FPU_get_address_16()
386 address += vm86_segment(addr_modes.override.segment, addr); FPU_get_address_16()
390 address = pm_address(FPU_modrm, addr_modes.override.segment, FPU_get_address_16()
/linux-4.1.27/drivers/gpu/ipu-v3/
H A Dipu-dmfc.c108 unsigned segment; member in struct:dmfc_channel
176 int segment, int burstsize) ipu_dmfc_setup_channel()
182 "dmfc: using %d slots starting from segment %d for IPU channel %d\n", ipu_dmfc_setup_channel()
183 slots, segment, dmfc->data->ipu_channel); ipu_dmfc_setup_channel()
217 field |= DMFC_SEGMENT(segment); ipu_dmfc_setup_channel()
227 dmfc->segment = segment; ipu_dmfc_setup_channel()
229 dmfc->slotmask = ((1 << slots) - 1) << segment; ipu_dmfc_setup_channel()
248 int i, segment = 0; dmfc_find_slots() local
257 return segment; dmfc_find_slots()
260 segment++; dmfc_find_slots()
271 dev_dbg(priv->dev, "dmfc: freeing %d slots starting from segment %d\n", ipu_dmfc_free_bandwidth()
272 dmfc->slots, dmfc->segment); ipu_dmfc_free_bandwidth()
281 dmfc->segment = 0; ipu_dmfc_free_bandwidth()
288 priv->channels[i].segment = ipu_dmfc_free_bandwidth()
292 priv->channels[i].segment; ipu_dmfc_free_bandwidth()
300 priv->channels[i].segment, ipu_dmfc_free_bandwidth()
313 int segment = -1, ret = 0; ipu_dmfc_alloc_bandwidth() local
330 segment = dmfc_find_slots(priv, slots * 2); ipu_dmfc_alloc_bandwidth()
335 if (segment >= 0) ipu_dmfc_alloc_bandwidth()
338 segment = dmfc_find_slots(priv, slots); ipu_dmfc_alloc_bandwidth()
339 if (segment < 0) { ipu_dmfc_alloc_bandwidth()
344 ipu_dmfc_setup_channel(dmfc, slots, segment, burstsize); ipu_dmfc_alloc_bandwidth()
175 ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots, int segment, int burstsize) ipu_dmfc_setup_channel() argument
/linux-4.1.27/include/uapi/linux/
H A Dflat.h31 with text segment from beginning of file */
32 unsigned long data_start; /* Offset of data segment from beginning of
34 unsigned long data_end; /* Offset of end of data segment
36 unsigned long bss_end; /* Offset of end of bss segment from beginning
39 /* (It is assumed that data_end through bss_end forms the bss segment.) */
H A Dcoff.h147 #define COFF_DMAGIC 0410 /* dirty text segment, data aligned */
184 #define COFF_STYP_REG 0x00 /* regular segment */
185 #define COFF_STYP_DSECT 0x01 /* dummy segment */
186 #define COFF_STYP_NOLOAD 0x02 /* no-load segment */
187 #define COFF_STYP_GROUP 0x04 /* group segment */
188 #define COFF_STYP_PAD 0x08 /* .pad segment */
190 #define COFF_STYP_TEXT 0x20 /* .text segment */
191 #define COFF_STYP_DATA 0x40 /* .data segment */
192 #define COFF_STYP_BSS 0x80 /* .bss segment */
249 char e_value[4]; /* Value (address) of the segment */
H A Delf-fdpic.h19 /* segment mappings for ELF FDPIC libraries/executables/interpreters */
H A Da.out.h110 /* Address of text segment in memory after it is loaded. */
115 /* Address of data segment in memory after it is loaded.
157 /* Address of bss segment in memory after it is loaded. */
245 /* Address (within segment) to be relocated. */
259 0 => relocate with the address of a segment.
/linux-4.1.27/arch/x86/include/asm/
H A Dsegment.h7 * Constructor for a conventional segment GDT (or LDT) entry.
37 /* LDT segment has TI set ... */
58 * 6 - TLS segment #1 [ glibc's TLS segment ]
59 * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
60 * 8 - TLS segment #3 <=== cacheline #3
67 * 12 - kernel code segment <=== cacheline #4
68 * 13 - kernel data segment
126 /* segment for calling fn: */
128 /* code segment for BIOS: */
134 /* data segment for BIOS: */
136 /* transfer data segment: */
138 /* another data segment: */
162 * We cannot use the same code segment descriptor for user and kernel mode,
252 * Load a segment. Fall back on loading the zero
253 * segment if something goes wrong..
273 * Save a segment register away:
H A Dmmu.h9 * we put the segment information here.
H A Duser32.h51 __u32 u_tsize; /* Text segment size (pages). */
52 __u32 u_dsize; /* Data segment size (pages). */
53 __u32 u_ssize; /* Stack segment size (pages). */
H A Dvm86.h10 * mode - the main change is that the old segment descriptors aren't
12 * hardware when a trap occurs), and the real segment descriptors are
19 * normal regs, with special meaning for the segment descriptors..
H A Ddesc_defs.h21 /* 8 byte segment descriptor */
53 u16 segment; member in struct:gate_struct64
85 #define gate_segment(g) ((g).segment)
H A Dstackprotector.h9 * and x86_32 use segment registers differently and thus handles this
21 * slower at loading segment registers with different value when
45 * 24 byte read-only segment initializer for stack canary. Linker
H A Duser_32.h75 * doesn't use the extra segment registers)
109 unsigned long int u_tsize; /* Text segment size (pages). */
110 unsigned long int u_dsize; /* Data segment size (pages). */
111 unsigned long int u_ssize; /* Stack segment size (pages). */
/linux-4.1.27/arch/x86/realmode/rm/
H A Drealmode.h10 * relocatable symbol for the segment portion.
H A Dreboot.S2 #include <asm/segment.h>
49 * Set up a GDT from which we can load segment descriptors for real
56 * Load the data segment registers with 16-bit compatible values
142 * 16-bit code segment pointing to real_mode_seg
150 * 16-bit data segment with the selector value 16 = 0x10 and
H A Dwakeup_asm.S5 #include <asm/segment.h>
47 junk in the data segment descriptor registers. The only way
159 .word 0xffff /* 16-bit code segment @ real_mode_base */
163 .word 0xffff /* 16-bit data segment @ real_mode_base */
H A Dtrampoline_32.S18 * and IP is zero. Thus, we load CS to the physical segment
23 #include <asm/segment.h>
/linux-4.1.27/arch/xtensa/include/asm/
H A Dsegment.h2 * include/asm-xtensa/segment.h
H A Ddma.h37 * the size of the statically mapped kernel segment
H A Dio.h39 * Note that we currently don't support any address outside the KIO segment.
/linux-4.1.27/arch/ia64/sn/pci/pcibr/
H A Dpcibr_provider.c31 u64 segment; sal_pcibr_slot_enable() local
36 segment = soft->pbi_buscommon.bs_persist_segment; sal_pcibr_slot_enable()
38 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment, sal_pcibr_slot_enable()
51 u64 segment; sal_pcibr_slot_disable() local
56 segment = soft->pbi_buscommon.bs_persist_segment; sal_pcibr_slot_disable()
59 segment, busnum, (u64) device, (u64) action, sal_pcibr_slot_disable()
69 int segment; sal_pcibr_error_interrupt() local
73 segment = soft->pbi_buscommon.bs_persist_segment; sal_pcibr_error_interrupt()
77 (u64) segment, (u64) busnum, 0, 0, 0, 0, 0); sal_pcibr_error_interrupt()
/linux-4.1.27/arch/mn10300/unit-asb2303/include/unit/
H A Dleds.h23 * use the 7-segment LEDs to indicate states
26 /* flip the 7-segment LEDs between "G" and "-" */
/linux-4.1.27/arch/avr32/include/asm/
H A Daddrspace.h23 /* Returns the privileged segment base of a given address */
30 * Map an address to a certain privileged segment
/linux-4.1.27/arch/frv/include/asm/
H A Dsegment.h0 /* segment.h: MMU segment settings
H A Duser.h61 unsigned long u_tsize; /* Text segment size (pages). */
62 unsigned long u_dsize; /* Data segment size (pages). */
63 unsigned long u_ssize; /* Stack segment size (pages). */
/linux-4.1.27/include/linux/
H A Dnilfs2_fs.h92 * @sr_nongc_ctime: write time of the last segment not for cleaner operation
95 * @sr_sufile: segment usage file inode
163 /*30*/ __le32 s_blocks_per_segment; /* number of blocks per full segment */
190 __le16 s_segment_usage_size; /* Size of a segment usage */
195 /*F8*/ __le32 s_c_interval; /* Commit interval of segment */
197 the segment construction */
241 #define NILFS_SUFILE_INO 5 /* segment usage file */
251 a full segment */
253 a partial segment */
400 * struct nilfs_segment_summary - segment summary header
402 * @ss_sumsum: checksum of segment summary
408 * @ss_next: next segment
411 * @ss_sumbytes: total size of segment summary in bytes
432 #define NILFS_SEGSUM_MAGIC 0x1eaffa11 /* segment summary magic number */
437 #define NILFS_SS_LOGBGN 0x0001 /* begins a logical segment */
438 #define NILFS_SS_LOGEND 0x0002 /* ends a logical segment */
441 #define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */
613 * struct nilfs_segment_usage - segment usage
615 * @su_nblocks: number of blocks in segment
626 /* segment usage flag */
675 * struct nilfs_sufile_header - segment usage file header
678 * @sh_last_alloc: last allocated segment number
693 * nilfs_suinfo - segment usage information
695 * @sui_nblocks: number of written blocks in segment
696 * @sui_flags: segment usage flags
722 * nilfs_suinfo_update - segment usage information update
723 * @sup_segnum: segment number
726 * @sup_sui: segment usage information
819 * struct nilfs_sustat - segment usage statistics
823 * @ss_ctime: creation time of the last segment
824 * @ss_nongc_ctime: creation time of the last segment not for GC
H A Dshm.h28 #define SHM_DEST 01000 /* segment will be destroyed on last detach */
29 #define SHM_LOCKED 02000 /* segment will not be swapped */
30 #define SHM_HUGETLB 04000 /* segment will use huge TLB pages */
H A Ddmar.h59 u16 segment; /* PCI domain */ member in struct:dmar_drhd_unit
116 struct dmar_dev_scope **devices, u16 segment);
120 void *start, void*end, u16 segment,
124 u16 segment, struct dmar_dev_scope *devices,
H A Df2fs_fs.h63 __le32 log_blocks_per_seg; /* log2 # of blocks per segment */
75 __le32 segment0_blkaddr; /* start block address of segment 0 */
127 /* allocation type of current segment */
271 * Each segment is 2MB in size by default so that a bitmap for validity of
294 __le64 mtime; /* segment age for cleaning */
302 * For segment summary
305 * exactly 2MB segment by default. Not allow to change the basic units.
321 /* a summary entry for a 4KB-sized block in a segment */
H A Di2c-mux.h31 * Called to create a i2c bus on a multiplexed bus segment.
/linux-4.1.27/arch/s390/mm/
H A Dextmem.c145 * Create the 8 bytes, ebcdic VM segment name from
189 * Perform a function on a dcss segment.
236 /* do a diag to get info about a segment.
305 /* multi-part segment. only one type supported here: query_segment_type()
341 * get info about a segment
345 * -ENOENT : no such segment
346 * -EOPNOTSUPP: multi-part segment cannot be used with linux
348 * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
367 * check if segment collides with other segments that are currently loaded
391 * real segment loading function, called from segment_load
495 * this function loads a DCSS segment
499 * addr : will be filled with start address of the segment
500 * end : will be filled with end address of the segment
504 * -ENOENT : no such segment
505 * -EOPNOTSUPP: multi-part segment cannot be used with linux
506 * -ENOSPC : segment cannot be used (overlaps with storage)
507 * -EBUSY : segment can temporarily not be used (overlaps with dcss)
508 * -ERANGE : segment cannot be used (exceeds kernel mapping range)
509 * -EPERM : segment is currently loaded with incompatible permissions
511 * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
543 * this function modifies the shared state of a DCSS segment. note that
548 * -EIO : could not perform load diagnose (segment gone!)
549 * -ENOENT : no such segment (segment gone!)
550 * -EAGAIN : segment is in use by other exploiters, try later
551 * -EINVAL : no segment with the given name is currently loaded - name invalid
552 * -EBUSY : segment can temporarily not be used (overlaps with dcss)
633 * Decrease the use count of a DCSS segment and remove
665 * save segment content permanently
H A Dvmem.c269 * Add memory segment to the segment list if it doesn't overlap with
270 * an already present segment.
292 * Remove memory segment from the segment list.
395 * Convert memblock.memory to a memory segment list so there is a single
/linux-4.1.27/drivers/net/ethernet/qlogic/qlge/
H A Dqlge.h1625 /* Firmware coredump header segment numbers. */
1746 /* segment 0 */
1749 /* segment 16 */
1753 /* segment 30 */
1757 /* segment 31 */
1762 /* segment 32 */
1769 /* segment 33 */
1773 /* segment 34 */
1779 /* segment 0 */
1782 /* segment 1 */
1787 /* segment 2 */
1791 /* segment 3 */
1795 /* segment 4 */
1799 /* segment 5 */
1803 /* segment 6 */
1807 /* segment 7 */
1811 /* segment 8 */
1815 /* segment 9 */
1819 /* segment 10 */
1823 /* segment 11 */
1827 /* segment 12 */
1830 /* segment 13 */
1834 /* segment 14 */
1838 /* segment 15 */
1842 /* segment 16 */
1846 /* segment 17 */
1850 /* segment 18 */
1854 /* segment 19 */
1858 /* segment 20 */
1862 /* segment 21 */
1866 /* segment 22 */
1870 /* segment 23 */
1874 /* segment 24 */
1878 /* segment 25 */
1882 /* segment 26 */
1886 /* segment 27 */
1890 /* segment 28 */
1894 /* segment 29 */
1898 /* segment 30 */
1902 /* segment 31 */
1907 /* segment 32 */
1914 /* segment 33 */
1917 /* segment 34 */
1921 /* segment 35 */
1925 /* segment 36 */
1929 /* segment 37 */
1933 /* segment 38 */
1937 /* segment 39 */
1941 /* segment 40 */
1945 /* segment 41 */
1949 /* segment 42 */
1953 /* segment 43 */
1957 /* segment 44 */
1961 /* segment 45 */
1965 /* segment 50 */
/linux-4.1.27/drivers/staging/rts5208/
H A Dms.c2408 ms_card->segment = vzalloc(size); ms_init_l2p_tbl()
2409 if (ms_card->segment == NULL) { ms_init_l2p_tbl()
2442 block_no = ms_card->segment[seg_no].disable_count++; ms_init_l2p_tbl()
2443 ms_card->segment[seg_no].defect_list[block_no] = defect_block; ms_init_l2p_tbl()
2447 ms_card->segment[i].build_flag = 0; ms_init_l2p_tbl()
2448 ms_card->segment[i].l2p_table = NULL; ms_init_l2p_tbl()
2449 ms_card->segment[i].free_table = NULL; ms_init_l2p_tbl()
2450 ms_card->segment[i].get_index = 0; ms_init_l2p_tbl()
2451 ms_card->segment[i].set_index = 0; ms_init_l2p_tbl()
2452 ms_card->segment[i].unused_blk_cnt = 0; ms_init_l2p_tbl()
2454 dev_dbg(rtsx_dev(chip), "defective block count of segment %d is %d\n", ms_init_l2p_tbl()
2455 i, ms_card->segment[i].disable_count); ms_init_l2p_tbl()
2461 if (ms_card->segment) { ms_init_l2p_tbl()
2462 vfree(ms_card->segment); ms_init_l2p_tbl()
2463 ms_card->segment = NULL; ms_init_l2p_tbl()
2472 struct zone_entry *segment; ms_get_l2p_tbl() local
2474 if (ms_card->segment == NULL) ms_get_l2p_tbl()
2477 segment = &(ms_card->segment[seg_no]); ms_get_l2p_tbl()
2479 if (segment->l2p_table) ms_get_l2p_tbl()
2480 return segment->l2p_table[log_off]; ms_get_l2p_tbl()
2489 struct zone_entry *segment; ms_set_l2p_tbl() local
2491 if (ms_card->segment == NULL) ms_set_l2p_tbl()
2494 segment = &(ms_card->segment[seg_no]); ms_set_l2p_tbl()
2495 if (segment->l2p_table) ms_set_l2p_tbl()
2496 segment->l2p_table[log_off] = phy_blk; ms_set_l2p_tbl()
2502 struct zone_entry *segment; ms_set_unused_block() local
2506 segment = &(ms_card->segment[seg_no]); ms_set_unused_block()
2508 segment->free_table[segment->set_index++] = phy_blk; ms_set_unused_block()
2509 if (segment->set_index >= MS_FREE_TABLE_CNT) ms_set_unused_block()
2510 segment->set_index = 0; ms_set_unused_block()
2512 segment->unused_blk_cnt++; ms_set_unused_block()
2518 struct zone_entry *segment; ms_get_unused_block() local
2521 segment = &(ms_card->segment[seg_no]); ms_get_unused_block()
2523 if (segment->unused_blk_cnt <= 0) ms_get_unused_block()
2526 phy_blk = segment->free_table[segment->get_index]; ms_get_unused_block()
2527 segment->free_table[segment->get_index++] = 0xFFFF; ms_get_unused_block()
2528 if (segment->get_index >= MS_FREE_TABLE_CNT) ms_get_unused_block()
2529 segment->get_index = 0; ms_get_unused_block()
2531 segment->unused_blk_cnt--; ms_get_unused_block()
2545 struct zone_entry *segment; ms_arbitrate_l2p() local
2550 segment = &(ms_card->segment[seg_no]); ms_arbitrate_l2p()
2551 tmp_blk = segment->l2p_table[log_off]; ms_arbitrate_l2p()
2559 segment->l2p_table[log_off] = phy_blk; ms_arbitrate_l2p()
2577 segment->l2p_table[log_off] = phy_blk; ms_arbitrate_l2p()
2587 struct zone_entry *segment; ms_build_l2p_tbl() local
2595 if (ms_card->segment == NULL) { ms_build_l2p_tbl()
2603 if (ms_card->segment[seg_no].build_flag) { ms_build_l2p_tbl()
2604 dev_dbg(rtsx_dev(chip), "l2p table of segment %d has been built\n", ms_build_l2p_tbl()
2614 segment = &(ms_card->segment[seg_no]); ms_build_l2p_tbl()
2616 if (segment->l2p_table == NULL) { ms_build_l2p_tbl()
2617 segment->l2p_table = vmalloc(table_size * 2); ms_build_l2p_tbl()
2618 if (segment->l2p_table == NULL) { ms_build_l2p_tbl()
2623 memset((u8 *)(segment->l2p_table), 0xff, table_size * 2); ms_build_l2p_tbl()
2625 if (segment->free_table == NULL) { ms_build_l2p_tbl()
2626 segment->free_table = vmalloc(MS_FREE_TABLE_CNT * 2); ms_build_l2p_tbl()
2627 if (segment->free_table == NULL) { ms_build_l2p_tbl()
2632 memset((u8 *)(segment->free_table), 0xff, MS_FREE_TABLE_CNT * 2); ms_build_l2p_tbl()
2637 disable_cnt = segment->disable_count; ms_build_l2p_tbl()
2639 segment->get_index = segment->set_index = 0; ms_build_l2p_tbl()
2640 segment->unused_blk_cnt = 0; ms_build_l2p_tbl()
2645 for (i = 0; i < segment->disable_count; i++) { ms_build_l2p_tbl()
2646 if (phy_blk == segment->defect_list[i]) { ms_build_l2p_tbl()
2709 if (segment->l2p_table[idx] == 0xFFFF) { ms_build_l2p_tbl()
2710 segment->l2p_table[idx] = phy_blk; ms_build_l2p_tbl()
2715 tmp_blk = segment->l2p_table[idx]; ms_build_l2p_tbl()
2727 segment->build_flag = 1; ms_build_l2p_tbl()
2730 segment->unused_blk_cnt); ms_build_l2p_tbl()
2734 if (segment->unused_blk_cnt < 2) ms_build_l2p_tbl()
2737 if (segment->unused_blk_cnt < 1) ms_build_l2p_tbl()
2747 if (segment->l2p_table[idx] == 0xFFFF) { ms_build_l2p_tbl()
2759 segment->l2p_table[idx] = phy_blk; ms_build_l2p_tbl()
2761 if (segment->unused_blk_cnt < 2) { ms_build_l2p_tbl()
2766 if (segment->unused_blk_cnt < 1) { ms_build_l2p_tbl()
2777 tmp_blk = segment->l2p_table[log_blk]; ms_build_l2p_tbl()
2792 segment->l2p_table[log_blk] = phy_blk; ms_build_l2p_tbl()
2806 segment->build_flag = 0; ms_build_l2p_tbl()
2807 if (segment->l2p_table) { ms_build_l2p_tbl()
2808 vfree(segment->l2p_table); ms_build_l2p_tbl()
2809 segment->l2p_table = NULL; ms_build_l2p_tbl()
2811 if (segment->free_table) { ms_build_l2p_tbl()
2812 vfree(segment->free_table); ms_build_l2p_tbl()
2813 segment->free_table = NULL; ms_build_l2p_tbl()
2862 /* Build table for the last segment, reset_ms_card()
3850 if (ms_card->segment[seg_no].build_flag == 0) { ms_rw_multi_sector()
4010 if (ms_card->segment[seg_no].build_flag == 0) { ms_rw_multi_sector()
4098 if (ms_card->segment != NULL) { ms_free_l2p_tbl()
4100 if (ms_card->segment[i].l2p_table != NULL) { ms_free_l2p_tbl()
4101 vfree(ms_card->segment[i].l2p_table); ms_free_l2p_tbl()
4102 ms_card->segment[i].l2p_table = NULL; ms_free_l2p_tbl()
4104 if (ms_card->segment[i].free_table != NULL) { ms_free_l2p_tbl()
4105 vfree(ms_card->segment[i].free_table); ms_free_l2p_tbl()
4106 ms_card->segment[i].free_table = NULL; ms_free_l2p_tbl()
4109 vfree(ms_card->segment); ms_free_l2p_tbl()
4110 ms_card->segment = NULL; ms_free_l2p_tbl()
/linux-4.1.27/include/scsi/
H A Dlibiscsi_tcp.h58 struct iscsi_segment segment; member in struct:iscsi_tcp_recv
104 /* segment helpers */
107 struct iscsi_segment *segment, int recv,
109 extern void iscsi_tcp_segment_unmap(struct iscsi_segment *segment);
111 extern void iscsi_segment_init_linear(struct iscsi_segment *segment,
116 iscsi_segment_seek_sg(struct iscsi_segment *segment,
/linux-4.1.27/arch/powerpc/kernel/
H A Dvmlinux.lds.S23 into the "notes" segment (at a non-zero load address).
26 segment and the kernel segment, so the dummy segment will get a
28 "notes" segment, since if nothing gets assigned to it, its load
79 /* The dummy segment contents for the bug workaround mentioned above
H A Dmachine_kexec_64.c36 unsigned long begin, end; /* limits of segment */ default_machine_kexec_prepare()
51 if (image->segment[i].mem < __pa(_end)) default_machine_kexec_prepare()
69 begin = image->segment[i].mem; default_machine_kexec_prepare()
70 end = begin + image->segment[i].memsz; default_machine_kexec_prepare()
88 begin = image->segment[i].mem; default_machine_kexec_prepare()
89 end = begin + image->segment[i].memsz; default_machine_kexec_prepare()
138 memcpy(ranges, image->segment, sizeof(ranges)); kexec_copy_flush()
/linux-4.1.27/arch/x86/vdso/
H A Dvdso-layout.lds.S5 * its virtual address, and with only one read-only segment.
25 * segment.
110 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
/linux-4.1.27/arch/s390/kernel/
H A Dcompat_ptrace.h41 u32 u_tsize; /* Text segment size (pages). */
42 u32 u_dsize; /* Data segment size (pages). */
43 u32 u_ssize; /* Stack segment size (pages). */
H A Dhead64.S48 .quad 0 # cr1: primary space segment table
54 .quad 0 # cr7: secondary space segment table
60 .quad 0 # cr13: home space segment table
/linux-4.1.27/arch/mn10300/include/asm/
H A Duser.h30 unsigned long int u_tsize; /* Text segment size (pages). */
31 unsigned long int u_dsize; /* Data segment size (pages). */
32 unsigned long int u_ssize; /* Stack segment size (pages). */
/linux-4.1.27/arch/ia64/kernel/
H A Dgate.lds.S3 * prelinked to its virtual address, with only one read-only segment and
4 * one execute-only segment (both fit in one page). This script controls
79 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
/linux-4.1.27/include/uapi/asm-generic/
H A Dmman.h6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */
H A Dshmbuf.h27 size_t shm_segsz; /* size of segment (bytes) */
/linux-4.1.27/arch/ia64/sn/kernel/
H A Dio_init.c27 static int max_segment_number; /* Default highest segment number */
49 static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) sal_get_pcibus_info() argument
57 (u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0); sal_get_pcibus_info()
65 sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, sal_get_pcidev_info() argument
74 (u64) segment, (u64) bus_number, (u64) devfn, sal_get_pcidev_info()
234 sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) sn_pci_controller_fixup() argument
242 status = sal_get_pcibus_info((u64) segment, (u64) busnum, sn_pci_controller_fixup()
250 controller->segment = segment; sn_pci_controller_fixup()
/linux-4.1.27/drivers/dma/xilinx/
H A Dxilinx_vdma.c163 * struct xilinx_vdma_tx_segment - Descriptor segment
166 * @phys: Physical address of segment
303 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
306 * Return: The allocated segment on success and NULL on failure.
311 struct xilinx_vdma_tx_segment *segment; xilinx_vdma_alloc_tx_segment() local
314 segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); xilinx_vdma_alloc_tx_segment()
315 if (!segment) xilinx_vdma_alloc_tx_segment()
318 memset(segment, 0, sizeof(*segment)); xilinx_vdma_alloc_tx_segment()
319 segment->phys = phys; xilinx_vdma_alloc_tx_segment()
321 return segment; xilinx_vdma_alloc_tx_segment()
325 * xilinx_vdma_free_tx_segment - Free transaction segment
327 * @segment: VDMA transaction segment
330 struct xilinx_vdma_tx_segment *segment) xilinx_vdma_free_tx_segment()
332 dma_pool_free(chan->desc_pool, segment, segment->phys); xilinx_vdma_free_tx_segment()
372 struct xilinx_vdma_tx_segment *segment, *next; xilinx_vdma_free_tx_descriptor() local
377 list_for_each_entry_safe(segment, next, &desc->segments, node) { xilinx_vdma_free_tx_descriptor()
378 list_del(&segment->node); xilinx_vdma_free_tx_descriptor()
379 xilinx_vdma_free_tx_segment(chan, segment); xilinx_vdma_free_tx_descriptor()
699 struct xilinx_vdma_tx_segment *segment, *last = NULL; xilinx_vdma_start_transfer() local
702 list_for_each_entry(segment, &desc->segments, node) { xilinx_vdma_start_transfer()
705 segment->hw.buf_addr); xilinx_vdma_start_transfer()
706 last = segment; xilinx_vdma_start_transfer()
936 struct xilinx_vdma_tx_segment *segment, *prev = NULL; xilinx_vdma_dma_prep_interleaved() local
958 segment = xilinx_vdma_alloc_tx_segment(chan); xilinx_vdma_dma_prep_interleaved()
959 if (!segment) xilinx_vdma_dma_prep_interleaved()
963 hw = &segment->hw; xilinx_vdma_dma_prep_interleaved()
980 prev->hw.next_desc = segment->phys; xilinx_vdma_dma_prep_interleaved()
983 /* Insert the segment into the descriptor segments list. */ xilinx_vdma_dma_prep_interleaved()
984 list_add_tail(&segment->node, &desc->segments); xilinx_vdma_dma_prep_interleaved()
986 prev = segment; xilinx_vdma_dma_prep_interleaved()
989 segment = list_first_entry(&desc->segments, xilinx_vdma_dma_prep_interleaved()
991 prev->hw.next_desc = segment->phys; xilinx_vdma_dma_prep_interleaved()
329 xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan, struct xilinx_vdma_tx_segment *segment) xilinx_vdma_free_tx_segment() argument
/linux-4.1.27/drivers/staging/rtl8712/
H A Drtl871x_ioctl.h54 int query_counter; /*count the number of query hits for this segment*/
55 int set_counter; /*count the number of set hits for this segment*/
/linux-4.1.27/arch/x86/um/vdso/
H A Dvdso-layout.lds.S3 * its virtual address, and with only one read-only segment.
56 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
/linux-4.1.27/arch/x86/vdso/vdso32/
H A Dsyscall.S10 #include <asm/segment.h>
70 * Pad out the segment to match the size of the sysenter.S version.
H A Dint80.S51 * Pad out the segment to match the size of the sysenter.S version.
H A Dnote.S22 * do not like negative offsets in instructions using segment overrides,
/linux-4.1.27/arch/xtensa/include/uapi/asm/
H A Dshmbuf.h25 size_t shm_segsz; /* size of segment (bytes) */
41 size_t shm_segsz; /* size of segment (bytes) */
/linux-4.1.27/arch/sh/include/asm/
H A Daddrspace.h28 /* Returns the privileged segment base of a given address */
33 * Map an address to a certain privileged segment
/linux-4.1.27/arch/mn10300/kernel/
H A Dprofile-low.S16 #include <asm/segment.h>
47 # calculate relative position in text segment
/linux-4.1.27/arch/mn10300/unit-asb2305/include/unit/
H A Dleds.h29 * use the 7-segment LEDs to indicate states
36 /* flip the 7-segment LEDs between "Gdb-" and "----" */
/linux-4.1.27/arch/mn10300/unit-asb2364/include/unit/
H A Dleds.h24 * use the 7-segment LEDs to indicate states
28 /* flip the 7-segment LEDs between "Gdb-" and "----" */
/linux-4.1.27/arch/c6x/kernel/
H A Dvmlinux.lds.S28 * Start kernel read only segment
103 * Start kernel read-write segment.
/linux-4.1.27/arch/m32r/include/asm/
H A Daddrspace.h25 * Returns the kernel segment base of a given address
43 * Map an address to a certain kernel segment
H A Dprocessor.h121 /* Copy and release all segment info associated with a VM */
127 /* Copy and release all segment info associated with a VM */
/linux-4.1.27/net/core/
H A Dtso.c49 /* Move to next segment */ tso_build_data()
72 /* Move to next segment */ tso_start()
/linux-4.1.27/arch/x86/pci/
H A Dmmconfig-shared.c55 /* keep list sorted by segment and starting bus number */ list_add_sorted()
57 if (cfg->segment > new->segment || list_add_sorted()
58 (cfg->segment == new->segment && list_add_sorted()
67 static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start, pci_mmconfig_alloc() argument
81 new->segment = segment; pci_mmconfig_alloc()
90 "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end); pci_mmconfig_alloc()
96 static struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start, pci_mmconfig_add() argument
101 new = pci_mmconfig_alloc(segment, start, end, addr); pci_mmconfig_add()
110 segment, start, end, &new->res, (unsigned long)addr); pci_mmconfig_add()
116 struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus) pci_mmconfig_lookup() argument
121 if (cfg->segment == segment && pci_mmconfig_lookup()
479 cfg->segment, cfg->start_bus, cfg->end_bus); is_mmconf_reserved()
490 cfg->segment, cfg->start_bus, cfg->end_bus, is_mmconf_reserved()
649 if (cfg->segment) __pci_mmcfg_init()
745 cfg->segment, cfg->start_bus, cfg->end_bus); pci_mmconfig_insert()
804 if (cfg->segment == seg && cfg->start_bus == start && pci_mmconfig_delete()
/linux-4.1.27/arch/sh/kernel/
H A Dmachine_kexec.c59 printk(" segment[%d]: 0x%08x - 0x%08x (0x%08x)\n", kexec_info()
61 (unsigned int)image->segment[i].mem, kexec_info()
62 (unsigned int)image->segment[i].mem + kexec_info()
63 image->segment[i].memsz, kexec_info()
64 (unsigned int)image->segment[i].memsz); kexec_info()
/linux-4.1.27/drivers/media/i2c/
H A Dad9389b.c686 v4l2_dbg(1, debug, sd, "EDID segment 0 not found\n"); ad9389b_get_edid()
799 int segment, u8 *buf) ad9389b_dbg_dump_edid()
806 v4l2_dbg(lvl, debug, sd, "edid segment %d\n", segment); ad9389b_dbg_dump_edid()
854 ed.segment = ad9389b_rd(sd, 0xc4); ad9389b_edid_handler()
991 static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment) edid_verify_crc() argument
997 if (edid_block_verify_crc(&data[segment * 256])) { edid_verify_crc()
998 if ((segment + 1) * 2 <= blocks) edid_verify_crc()
999 return edid_block_verify_crc(&data[segment * 256 + 128]); edid_verify_crc()
1005 static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment) edid_verify_header() argument
1014 if (segment) edid_verify_header()
1028 int segment; ad9389b_check_edid_status() local
1037 segment = ad9389b_rd(sd, 0xc4); ad9389b_check_edid_status()
1038 if (segment >= EDID_MAX_SEGM) { ad9389b_check_edid_status()
1039 v4l2_err(sd, "edid segment number too big\n"); ad9389b_check_edid_status()
1042 v4l2_dbg(1, debug, sd, "%s: got segment %d\n", __func__, segment); ad9389b_check_edid_status()
1043 ad9389b_edid_rd(sd, 256, &state->edid.data[segment * 256]); ad9389b_check_edid_status()
1044 ad9389b_dbg_dump_edid(2, debug, sd, segment, ad9389b_check_edid_status()
1045 &state->edid.data[segment * 256]); ad9389b_check_edid_status()
1046 if (segment == 0) { ad9389b_check_edid_status()
1051 if (!edid_verify_crc(sd, segment) || ad9389b_check_edid_status()
1052 !edid_verify_header(sd, segment)) { ad9389b_check_edid_status()
1053 /* edid crc error, force reread of edid segment */ ad9389b_check_edid_status()
1059 /* one more segment read ok */ ad9389b_check_edid_status()
1060 state->edid.segments = segment + 1; ad9389b_check_edid_status()
1062 /* Request next EDID segment */ ad9389b_check_edid_status()
1063 v4l2_dbg(1, debug, sd, "%s: request segment %d\n", ad9389b_check_edid_status()
1073 /* report when we have all segments but report only for segment 0 */ ad9389b_check_edid_status()
1075 ed.segment = 0; ad9389b_check_edid_status()
798 ad9389b_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, int segment, u8 *buf) ad9389b_dbg_dump_edid() argument
H A Dadv7511.c1031 static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, int segment, uint8_t *buf) adv7511_dbg_dump_edid() argument
1035 v4l2_dbg(lvl, debug, sd, "edid segment %d\n", segment); adv7511_dbg_dump_edid()
1058 ed.segment = adv7511_rd(sd, 0xc4); adv7511_notify_no_edid()
1206 static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment) edid_verify_crc() argument
1212 if (!edid_block_verify_crc(&data[segment * 256])) edid_verify_crc()
1214 if ((segment + 1) * 2 <= blocks) edid_verify_crc()
1215 return edid_block_verify_crc(&data[segment * 256 + 128]); edid_verify_crc()
1219 static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment) edid_verify_header() argument
1227 if (segment != 0) edid_verify_header()
1244 int segment = adv7511_rd(sd, 0xc4); adv7511_check_edid_status() local
1247 if (segment >= EDID_MAX_SEGM) { adv7511_check_edid_status()
1248 v4l2_err(sd, "edid segment number too big\n"); adv7511_check_edid_status()
1251 v4l2_dbg(1, debug, sd, "%s: got segment %d\n", __func__, segment); adv7511_check_edid_status()
1252 adv7511_edid_rd(sd, 256, &state->edid.data[segment * 256]); adv7511_check_edid_status()
1253 adv7511_dbg_dump_edid(2, debug, sd, segment, &state->edid.data[segment * 256]); adv7511_check_edid_status()
1254 if (segment == 0) { adv7511_check_edid_status()
1258 if (!edid_verify_crc(sd, segment) || adv7511_check_edid_status()
1259 !edid_verify_header(sd, segment)) { adv7511_check_edid_status()
1260 /* edid crc error, force reread of edid segment */ adv7511_check_edid_status()
1267 /* one more segment read ok */ adv7511_check_edid_status()
1268 state->edid.segments = segment + 1; adv7511_check_edid_status()
1271 /* Request next EDID segment */ adv7511_check_edid_status()
1272 v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments); adv7511_check_edid_status()
1280 v4l2_dbg(1, debug, sd, "%s: edid complete with %d segment(s)\n", __func__, state->edid.segments); adv7511_check_edid_status()
1284 but report only for segment 0 adv7511_check_edid_status()
1287 ed.segment = 0; adv7511_check_edid_status()
/linux-4.1.27/drivers/acpi/acpica/
H A Dnsaccess.c371 * optional scope prefix followed by a name segment part. acpi_ns_lookup()
385 /* Point to name segment part */ acpi_ns_lookup()
413 * Point past this prefix to the name segment acpi_ns_lookup()
445 * The segment part consists of either: acpi_ns_lookup()
446 * - A Null name segment (0) acpi_ns_lookup()
450 * - A single 4-byte name segment acpi_ns_lookup()
475 /* Two segments, point to first name segment */ acpi_ns_lookup()
491 /* Extract segment count, point to first name segment */ acpi_ns_lookup()
505 * only one name segment and Pathname is already pointing to it. acpi_ns_lookup()
510 "Simple Pathname (1 segment, Flags=%X)\n", acpi_ns_lookup()
519 * Search namespace for each segment of the name. Loop through and acpi_ns_lookup()
520 * verify (or add to the namespace) each name segment. acpi_ns_lookup()
523 * segment. (We don't care about the types along the path, only acpi_ns_lookup()
532 /* This is the last segment, enable typechecking */ acpi_ns_lookup()
603 /* Special handling for the last segment (num_segments == 0) */ acpi_ns_lookup()
609 * If 1) This is the last segment (num_segments == 0) acpi_ns_lookup()
638 * If this is the last name segment and we are not looking for a acpi_ns_lookup()
647 /* Point to next name segment and make this node current */ acpi_ns_lookup()
H A Dexnames.c65 * RETURN: A pointer to the allocated string segment. This segment must
136 * append the segment(s) acpi_ex_allocate_name_string()
168 * valid name segment acpi_ex_name_segment()
186 /* Valid name segment */ acpi_ex_name_segment()
315 /* Examine first character of name for name segment prefix operator */ acpi_ex_get_name_string()
401 /* Name segment string */ acpi_ex_get_name_string()
H A Dnsutils.c176 * For the internal name, the required length is 4 bytes per segment, plus acpi_ns_get_internal_name_length()
177 * 1 each for root_prefix, multi_name_prefix_op, segment count, trailing null acpi_ns_get_internal_name_length()
202 * path separators within the string. Start with one segment since the acpi_ns_get_internal_name_length()
203 * segment count is [(# separators) + 1], and zero separators is ok. acpi_ns_get_internal_name_length()
289 /* Pad the segment with underscore(s) if segment is short */ acpi_ns_build_internal_name()
308 /* Move on the next segment */ acpi_ns_build_internal_name()
363 /* We need a segment to store the internal name */ acpi_ns_internalize_name()
524 /* Copy and validate the 4-char name segment */ acpi_ns_externalize_name()
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
H A Di40e_hmc.h118 * @sd_index: segment descriptor index
139 * @sd_index: segment descriptor index
158 * @sd_idx: segment descriptor index
167 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
172 * @sd_idx: pointer to return index of the segment descriptor in question
173 * @sd_limit: pointer to return the maximum number of segment descriptors
175 * This function calculates the segment descriptor index and index limit
H A Di40e_hmc.c35 * i40e_add_sd_table_entry - Adds a segment descriptor to the table
38 * @sd_index: segment descriptor index to manipulate
39 * @type: what type of segment descriptor we're manipulating
279 * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
310 * @idx: segment descriptor index to find the relevant page descriptor
337 * @idx: segment descriptor index to find the relevant page descriptor
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/
H A Di40e_hmc.h118 * @sd_index: segment descriptor index
139 * @sd_index: segment descriptor index
158 * @sd_idx: segment descriptor index
167 * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
172 * @sd_idx: pointer to return index of the segment descriptor in question
173 * @sd_limit: pointer to return the maximum number of segment descriptors
175 * This function calculates the segment descriptor index and index limit
/linux-4.1.27/drivers/misc/eeprom/
H A Dat25.c179 unsigned segment; at25_ee_write() local
211 segment = buf_size - (offset % buf_size); at25_ee_write()
212 if (segment > count) at25_ee_write()
213 segment = count; at25_ee_write()
214 memcpy(cp, buf, segment); at25_ee_write()
216 segment + at25->addrlen + 1); at25_ee_write()
219 segment, offset, (int) status); at25_ee_write()
248 segment, offset, at25_ee_write()
255 off += segment; at25_ee_write()
256 buf += segment; at25_ee_write()
257 count -= segment; at25_ee_write()
258 written += segment; at25_ee_write()
/linux-4.1.27/arch/x86/include/uapi/asm/
H A De820.h58 __u64 addr; /* start of memory segment */
59 __u64 size; /* size of memory segment */
60 __u32 type; /* type of memory segment */
H A Dmce.h20 __u8 cs; /* code segment */
/linux-4.1.27/arch/mips/include/asm/
H A Dbootinfo.h101 phys_addr_t addr; /* start of memory segment */
102 phys_addr_t size; /* size of memory segment */
103 long type; /* type of memory segment */
H A Daddrspace.h46 * Returns the kernel segment base of a given address
86 * Map an address to a certain kernel segment
/linux-4.1.27/arch/mips/include/asm/netlogic/
H A Dpsb-bootinfo.h100 uint64_t addr; /* start of memory segment */
101 uint64_t size; /* size of memory segment */
102 uint32_t type; /* type of memory segment */
/linux-4.1.27/include/uapi/linux/can/
H A Dnetlink.h34 __u32 prop_seg; /* Propagation segment in TQs */
35 __u32 phase_seg1; /* Phase buffer segment 1 in TQs */
36 __u32 phase_seg2; /* Phase buffer segment 2 in TQs */
/linux-4.1.27/arch/mips/kernel/
H A Dvmlinux.lds.S36 /* Read-only sections, merged into text segment: */
42 /* Set the vaddr for the text segment to a value
100 we can shorten the on-disk segment size. */
/linux-4.1.27/arch/powerpc/mm/
H A Dmmu_context_hash32.c34 * (virtual segment identifiers) for each context. Although the
50 * segment IDs). We use a skew on both the context and the high 4 bits
51 * of the 32-bit virtual address (the "effective segment ID") in order
H A Dhash_low_64.S52 * Adds a 4K page to the hash table in a segment of 4K pages only
112 cmpdi r9,0 /* check segment size */
135 * store it in r28 for 1T segment
204 ld r10,STK_PARAM(R9)(r1) /* segment size */
229 ld r10,STK_PARAM(R9)(r1) /* segment size */
301 ld r8,STK_PARAM(R9)(r1) /* segment size */
337 * 64K SW & 4K or 64K HW in a 4K segment pages implementation *
413 cmpdi r9,0 /* check segment size */
444 * store it in r28 for 1T segment
533 ld r10,STK_PARAM(R9)(r1) /* segment size */
562 ld r10,STK_PARAM(R9)(r1) /* segment size */
590 * useless now that the segment has been switched to 4k pages.
669 ld r8,STK_PARAM(R9)(r1) /* segment size */
708 * 64K SW & 64K HW in a 64K segment pages implementation *
774 cmpdi r9,0 /* check segment size */
796 * store it in r28 for 1T segment
868 ld r10,STK_PARAM(R9)(r1) /* segment size */
893 ld r10,STK_PARAM(R9)(r1) /* segment size */
965 ld r8,STK_PARAM(R9)(r1) /* segment size */
H A Dslb.c122 /* Slot 1 - first VMALLOC segment */ __slb_flush_and_rebolt()
159 * 1. The system is not 1T segment size capable. Use the GET_ESID compare.
168 /* System is not 1T segment size capable. */ esids_match()
187 /* Flush all user entries from the segment table of the current processor. */ switch_slb()
231 * 0x10000000 so it makes sense to preload this segment. switch_slb()
321 * which is in the first segment of the linear mapping, and also slb_initialize()
/linux-4.1.27/arch/ia64/include/asm/
H A Dpci.h15 __u16 segment; /* PCI Segment number */ member in struct:pci_vector_struct
94 int segment; member in struct:pci_controller
102 #define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment)
H A Duaccess.h66 #define __access_ok(addr, size, segment) \
69 (likely((unsigned long) (addr) <= (segment).seg) \
70 && ((segment).seg == KERNEL_DS.seg \
187 #define __do_get_user(check, x, ptr, size, segment) \
193 if (!check || __access_ok(__gu_ptr, size, segment)) \
206 #define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment)
214 #define __do_put_user(check, x, ptr, size, segment) \
221 if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \
233 #define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment)
/linux-4.1.27/drivers/scsi/qla2xxx/
H A Dqla_fw.h401 uint16_t wr_dseg_count; /* Write Data segment count. */
402 uint16_t rd_dseg_count; /* Read Data segment count. */
411 uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
412 uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
422 uint32_t fcp_data_dseg_address[2]; /* Data segment address. */
423 uint16_t fcp_data_dseg_len; /* Data segment length. */
438 uint16_t dseg_count; /* Data segment count. */
450 uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
451 uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
453 uint32_t fcp_rsp_dseg_address[2]; /* Data segment address. */
460 uint32_t fcp_data_dseg_address[2]; /* Data segment address. */
461 uint32_t fcp_data_dseg_len; /* Data segment length. */
477 uint16_t dseg_count; /* Data segment count. */
507 uint32_t dseg_0_address[2]; /* Data segment 0 address. */
508 uint32_t dseg_0_len; /* Data segment 0 length. */
524 uint16_t dseg_count; /* Data segment count. */
532 uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
533 uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
535 uint32_t fcp_rsp_dseg_address[2]; /* Data segment address. */
542 uint32_t crc_context_address[2]; /* Data segment address. */
543 uint16_t crc_context_len; /* Data segment length. */
663 uint32_t dseg_0_address[2]; /* Data segment 0 address. */
664 uint32_t dseg_0_len; /* Data segment 0 length. */
665 uint32_t dseg_1_address[2]; /* Data segment 1 address. */
666 uint32_t dseg_1_len; /* Data segment 1 length. */
715 uint32_t tx_address[2]; /* Data segment 0 address. */
716 uint32_t tx_len; /* Data segment 0 length. */
717 uint32_t rx_address[2]; /* Data segment 1 address. */
718 uint32_t rx_len; /* Data segment 1 length. */
1313 uint32_t tx_address[2]; /* Data segment 0 address. */
1314 uint32_t tx_len; /* Data segment 0 length. */
1315 uint32_t rx_address[2]; /* Data segment 1 address. */
1316 uint32_t rx_len; /* Data segment 1 length. */
H A Dqla_mr.h32 __le16 dseg_count; /* Data segment count. */
49 uint32_t dseg_0_address[2]; /* Data segment 0 address. */
50 uint32_t dseg_0_len; /* Data segment 0 length. */
179 __le32 dseg_rq_address[2]; /* Data segment 0 address. */
180 __le32 dseg_rq_len; /* Data segment 0 length. */
181 __le32 dseg_rsp_address[2]; /* Data segment 1 address. */
182 __le32 dseg_rsp_len; /* Data segment 1 length. */
H A Dqla_def.h1554 uint16_t dseg_count; /* Data segment count. */
1557 uint32_t dseg_0_address; /* Data segment 0 address. */
1558 uint32_t dseg_0_length; /* Data segment 0 length. */
1559 uint32_t dseg_1_address; /* Data segment 1 address. */
1560 uint32_t dseg_1_length; /* Data segment 1 length. */
1561 uint32_t dseg_2_address; /* Data segment 2 address. */
1562 uint32_t dseg_2_length; /* Data segment 2 length. */
1580 uint16_t dseg_count; /* Data segment count. */
1583 uint32_t dseg_0_address[2]; /* Data segment 0 address. */
1584 uint32_t dseg_0_length; /* Data segment 0 length. */
1585 uint32_t dseg_1_address[2]; /* Data segment 1 address. */
1586 uint32_t dseg_1_length; /* Data segment 1 length. */
1599 uint32_t dseg_0_address; /* Data segment 0 address. */
1600 uint32_t dseg_0_length; /* Data segment 0 length. */
1601 uint32_t dseg_1_address; /* Data segment 1 address. */
1602 uint32_t dseg_1_length; /* Data segment 1 length. */
1603 uint32_t dseg_2_address; /* Data segment 2 address. */
1604 uint32_t dseg_2_length; /* Data segment 2 length. */
1605 uint32_t dseg_3_address; /* Data segment 3 address. */
1606 uint32_t dseg_3_length; /* Data segment 3 length. */
1607 uint32_t dseg_4_address; /* Data segment 4 address. */
1608 uint32_t dseg_4_length; /* Data segment 4 length. */
1609 uint32_t dseg_5_address; /* Data segment 5 address. */
1610 uint32_t dseg_5_length; /* Data segment 5 length. */
1611 uint32_t dseg_6_address; /* Data segment 6 address. */
1612 uint32_t dseg_6_length; /* Data segment 6 length. */
1624 uint32_t dseg_0_address[2]; /* Data segment 0 address. */
1625 uint32_t dseg_0_length; /* Data segment 0 length. */
1626 uint32_t dseg_1_address[2]; /* Data segment 1 address. */
1627 uint32_t dseg_1_length; /* Data segment 1 length. */
1628 uint32_t dseg_2_address [2]; /* Data segment 2 address. */
1629 uint32_t dseg_2_length; /* Data segment 2 length. */
1630 uint32_t dseg_3_address[2]; /* Data segment 3 address. */
1631 uint32_t dseg_3_length; /* Data segment 3 length. */
1632 uint32_t dseg_4_address[2]; /* Data segment 4 address. */
1633 uint32_t dseg_4_length; /* Data segment 4 length. */
1686 __le16 dseg_count; /* Data segment count */
1691 uint32_t dif_length; /* Data segment 0
1883 uint32_t dseg_req_address[2]; /* Data segment 0 address. */
1884 uint32_t dseg_req_length; /* Data segment 0 length. */
1885 uint32_t dseg_rsp_address[2]; /* Data segment 1 address. */
1886 uint32_t dseg_rsp_length; /* Data segment 1 length. */
H A Dqla_target.h287 uint16_t dseg_count; /* Data segment count. */
293 uint32_t dseg_0_address; /* Data segment 0 address. */
294 uint32_t dseg_0_length; /* Data segment 0 length. */
295 uint32_t dseg_1_address; /* Data segment 1 address. */
296 uint32_t dseg_1_length; /* Data segment 1 length. */
297 uint32_t dseg_2_address; /* Data segment 2 address. */
298 uint32_t dseg_2_length; /* Data segment 2 length. */
453 uint16_t dseg_count; /* Data segment count. */
470 /* Data segment 0 address. */
472 /* Data segment 0 length. */
500 uint16_t dseg_count; /* Data segment count. */
550 uint16_t dseg_count; /* Data segment count. */
567 __le32 crc_context_address[2];/* Data segment address. */
568 uint16_t crc_context_len; /* Data segment length. */
582 uint16_t dseg_count; /* Data segment count. */
/linux-4.1.27/arch/x86/um/asm/
H A Dprocessor_32.h10 #include <asm/segment.h>
H A Dbarrier.h5 #include <asm/segment.h>
/linux-4.1.27/arch/sparc/include/uapi/asm/
H A Dmman.h14 #define MAP_GROWSDOWN 0x0200 /* stack-like segment */
H A Dshmbuf.h28 size_t shm_segsz; /* size of segment (bytes) */
H A Ddisplay7seg.h3 * display7seg - Driver interface for the 7-segment display
/linux-4.1.27/arch/unicore32/kernel/
H A Dksyms.c43 /* user mem (segment) */
H A Dvmlinux.lds.S37 .text : { /* Real text segment */
/linux-4.1.27/fs/logfs/
H A Dgc.c21 * but just gently pick one segment every so often and minimize overhead.
26 #define SCAN_RATIO 512 /* number of scanned segments per gc'd segment */
58 printk(KERN_ERR"LOGFS: segment of unknown level %x found\n", root_distance()
93 * Returns the bytes consumed by valid objects in this segment. Object headers
94 * are counted, the segment header is not.
266 * about that segment for a while. We have better candidates for each purpose.
275 log_gc_noisy("add reserve segment %x (ec %x) at %llx\n", __add_candidate()
280 log_gc_noisy("add free segment %x (ec %x) at %llx\n", __add_candidate()
356 * Find the best segment for garbage collection. Main criterion is
357 * the segment requiring the least effort to clean. Secondary
360 * So we search the least effort segment on the lowest level first,
361 * then move up and pick another segment iff is requires significantly
400 log_gc("GC segment #%02x at %llx, %x required, %x free, %x valid, %llx free\n", __logfs_gc_once()
405 log_gc("GC segment #%02x complete - now %x valid\n", segno, __logfs_gc_once()
435 * block from the segment size on next invocation if logfs_scan_some()
652 * written out correctly, we must GC this segment. So assume the check_area()
H A Dlogfs_abi.h151 * LOGFS_SEGMENT_RESERVE is the amount of space reserved for each segment for
152 * its segment header and the padded space at the end when no further objects
174 * struct logfs_segment_header - per-segment header in the ostore
178 * @type: segment type, see above
179 * @level: GC level for all objects in this segment
180 * @segno: segment number
181 * @ec: erase count for this segment
213 * @ds_segment_shift: log2 of segment size
300 * LOGFS_INO_SEGFILE - per-segment used bytes and erase count
399 * struct logfs_segment_entry - segment file entry
404 * Segment file contains one entry for every segment. ec_level contains the
407 * of valid bytes or RESERVED (-1 again) if the segment is used for either the
408 * superblock or the journal, or when the segment is bad.
442 * VIM_SEGFILE - for segment file only - very short-living
453 * @segno: segment number of area
562 * struct logfs_seg_alias - list of segment aliases
H A Dlogfs.h108 * @a_segno: segment number of area
115 struct logfs_area { /* a segment open for writing */
129 * @get_free_segment: fill area->ofs with the offset of a free segment
131 * @erase_segment: erase and setup segment
144 * @writeseg: write one segment. may be a partial segment
145 * @erase: erase one segment
174 * struct gc_candidate - "candidate" segment to be garbage collected next
177 * @segno: segment number
179 * @erase_count: erase count of segment
328 struct inode *s_segfile_inode; /* segment file */
333 mempool_t *s_alias_pool; /* aliases in segment.c */
352 long s_segsize; /* size of a segment */
353 int s_segshift; /* log2 of segment size */
357 long s_no_blocks; /* blocks per segment */
361 struct logfs_area *s_area[LOGFS_NO_AREAS]; /* open segment array */
372 struct candidate_list s_reserve_list; /* Bad segment reserve */
388 struct logfs_area *s_journal_area; /* open journal segment */
434 * free. When encountering bad blocks, the affected segment's data
435 * is _temporarily_ moved to a reserved segment.
584 /* segment.c */
/linux-4.1.27/include/media/
H A Dadv7511.h34 int segment; member in struct:adv7511_edid_detect
H A Dad9389b.h46 int segment; member in struct:ad9389b_edid_detect
/linux-4.1.27/arch/s390/include/uapi/asm/
H A Dshmbuf.h16 size_t shm_segsz; /* size of segment (bytes) */
/linux-4.1.27/arch/mips/include/uapi/asm/
H A Dshmbuf.h15 size_t shm_segsz; /* size of segment (bytes) */
/linux-4.1.27/arch/mn10300/include/uapi/asm/
H A Dshmbuf.h16 size_t shm_segsz; /* size of segment (bytes) */
/linux-4.1.27/arch/parisc/include/uapi/asm/
H A Dshmbuf.h33 size_t shm_segsz; /* size of segment (bytes) */
/linux-4.1.27/arch/powerpc/include/uapi/asm/
H A Dmman.h19 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */
H A Dshmbuf.h39 size_t shm_segsz; /* size of segment (bytes) */
/linux-4.1.27/arch/avr32/include/uapi/asm/
H A Dshmbuf.h16 size_t shm_segsz; /* size of segment (bytes) */
/linux-4.1.27/arch/cris/include/uapi/asm/
H A Dshmbuf.h16 size_t shm_segsz; /* size of segment (bytes) */
/linux-4.1.27/arch/cris/kernel/
H A Dsys_cris.c27 #include <asm/segment.h>
/linux-4.1.27/arch/frv/include/uapi/asm/
H A Dshmbuf.h16 size_t shm_segsz; /* size of segment (bytes) */
/linux-4.1.27/arch/m32r/include/uapi/asm/
H A Dshmbuf.h16 size_t shm_segsz; /* size of segment (bytes) */
/linux-4.1.27/arch/alpha/include/uapi/asm/
H A Dshmbuf.h15 size_t shm_segsz; /* size of segment (bytes) */
/linux-4.1.27/drivers/lguest/
H A Dsegments.c10 * In these modern times, the segment handling code consists of simple sanity
24 * set up, these segments can be loaded into one of the 6 "segment registers".
30 * Anyway, the GDT entry contains a base (the start address of the segment), a
31 * limit (the size of the segment - 1), and some flags. Sounds simple, and it
63 * Protection Fault in the Switcher when it restores a Guest segment register
90 * that entry into a segment register. But the GDT isn't fixup_gdt_table()
116 * The TSS segment refers to the TSS entry for this particular CPU. setup_default_gdt_entries()
128 gdt[GDT_ENTRY_TSS].s = 0x0; /* system segment */ setup_default_gdt_entries()
/linux-4.1.27/arch/sh/kernel/vsyscall/
H A Dvsyscall.lds.S4 * segment (that fits in one page). This script controls its layout.
61 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
/linux-4.1.27/arch/s390/kernel/vdso32/
H A Dvdso32.lds.S34 * Other stuff is appended to the text segment:
112 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
/linux-4.1.27/arch/s390/kernel/vdso64/
H A Dvdso64.lds.S34 * Other stuff is appended to the text segment:
112 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
/linux-4.1.27/arch/mips/lib/
H A Duncached.c28 * segment used is CKSEG1.
29 * 2. If the original address is in XKPHYS, then the uncached segment
/linux-4.1.27/drivers/infiniband/core/
H A Dsmi.c78 /* C14-9:3 -- We're at the end of the DR segment of path */ smi_handle_dr_smp_send()
109 /* C14-13:3 -- at the end of the DR segment of path */ smi_handle_dr_smp_send()
160 /* C14-9:3 -- We're at the end of the DR segment of path */ smi_handle_dr_smp_recv()
194 /* C14-13:3 -- We're at the end of the DR segment of path */ smi_handle_dr_smp_recv()
224 /* C14-9:3 -- at the end of the DR segment of path */ smi_check_forward_dr_smp()
237 /* C14-13:3 -- at the end of the DR segment of path */ smi_check_forward_dr_smp()
H A Dsmi.h68 /* C14-9:3 -- We're at the end of the DR segment of path */ smi_check_local_smp()
83 /* C14-13:3 -- We're at the end of the DR segment of path */ smi_check_local_returning_smp()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/
H A Dbase.c58 /* acquire data segment access */ nvkm_pmu_send()
72 /* release data segment access */ nvkm_pmu_send()
97 /* acquire data segment access */ nvkm_pmu_recv()
111 /* release data segment access */ nvkm_pmu_recv()
207 /* upload data segment */ _nvkm_pmu_init()
212 /* upload code segment */ _nvkm_pmu_init()
/linux-4.1.27/tools/testing/selftests/x86/
H A Dsigreturn.c90 * - code16_sel: A 16-bit LDT code segment pointing to int3.
91 * - data16_sel: A 16-bit LDT data segment pointing to stack16.
92 * - npcode32_sel: A 32-bit not-present LDT code segment pointing to int3.
93 * - npdata32_sel: A 32-bit not-present LDT data segment pointing to stack16.
94 * - gdt_data16_idx: A 16-bit GDT data segment pointing to stack16.
95 * - gdt_npdata32_idx: A 32-bit not-present GDT data segment pointing to
149 printf("[NOTE]\tFailed to create %s segment\n", name); add_ldt()
366 * a usable code segment selector.
397 /* Finds a usable code segment of the requested bitness. */ find_cs()
421 printf("[SKIP]\tCode segment unavailable for %d-bit CS, %d-bit SS\n", test_valid_sigreturn()
431 printf("[SKIP]\tData segment unavailable for %d-bit CS, 16-bit SS\n", test_valid_sigreturn()
653 /* These fail because SS isn't a data segment, resulting in #GP(SS) */ main()
658 /* Try to return to a not-present code segment, triggering #NP(SS). */ main()
662 * Try to return to a not-present but otherwise valid data segment. main()
676 * segment without invoking espfix. Newer kernels don't allow main()
/linux-4.1.27/arch/powerpc/include/asm/
H A Duaccess.h23 * The fs/ds values are now the highest legal address in the "segment".
50 #define __access_ok(addr, size, segment) \
51 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
55 #define __access_ok(addr, size, segment) \
56 (((addr) <= (segment).seg) && \
57 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
H A Dpte-hash32.h7 * table containing PTEs, together with a set of 16 segment registers,
/linux-4.1.27/drivers/acpi/
H A Dpci_irq.c102 unsigned int segment; member in struct:prt_quirk
114 * interrupt at the listed segment/bus/device/pin is connected to the first
143 entry->id.segment == quirk->segment && do_prt_fixups()
152 entry->id.segment, entry->id.bus, do_prt_fixups()
164 int segment = pci_domain_nr(dev->bus); acpi_pci_irq_check_entry() local
182 entry->id.segment = segment; acpi_pci_irq_check_entry()
217 entry->id.segment, entry->id.bus, acpi_pci_irq_check_entry()
/linux-4.1.27/fs/jfs/
H A Djfs_dtree.h38 * entry segment/slot
40 * an entry consists of type dependent head/only segment/slot and
42 * N.B. last/only segment of entry is terminated by next = -1;
63 * internal node entry head/only segment
81 * leaf node entry head/only segment
/linux-4.1.27/arch/microblaze/include/asm/
H A Dmmu_context_mm.h22 * segment IDs). We use a skew on both the context and the high 4 bits
23 * of the 32-bit virtual address (the "effective segment ID") in order
46 * This is done byloading up the segment registers for the user part of the
/linux-4.1.27/arch/m68k/include/asm/
H A Duser.h65 unsigned long int u_tsize; /* Text segment size (pages). */
66 unsigned long int u_dsize; /* Data segment size (pages). */
67 unsigned long int u_ssize; /* Stack segment size (pages). */
H A Dthread_info.h6 #include <asm/segment.h>
/linux-4.1.27/arch/arm/include/asm/
H A Duser.h60 unsigned long int u_tsize; /* Text segment size (pages). */
61 unsigned long int u_dsize; /* Data segment size (pages). */
62 unsigned long int u_ssize; /* Stack segment size (pages). */
/linux-4.1.27/drivers/media/usb/cpia2/
H A Dcpia2.h104 #define JPEG_MARKER_COM (1<<6) /* Comment segment */
415 int APPn; /* Number of APP segment to be written, must be 0..15 */
416 int APP_len; /* Length of data in JPEG APPn segment */
417 char APP_data[60]; /* Data in the JPEG APPn segment. */
419 int COM_len; /* Length of data in JPEG COM segment */
420 char COM_data[60]; /* Data in JPEG COM segment */
/linux-4.1.27/arch/m68k/sun3/
H A Dmmu_emu.c25 #include <asm/segment.h>
356 unsigned long segment, offset; mmu_emu_handle_fault() local
377 segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF; mmu_emu_handle_fault()
381 printk ("mmu_emu_handle_fault: segment=%lx offset=%lx\n", segment, offset); mmu_emu_handle_fault()
384 pte = (pte_t *) pgd_val (*(crp + segment)); mmu_emu_handle_fault()
H A Dsun3ints.c14 #include <asm/segment.h>
/linux-4.1.27/arch/alpha/include/asm/
H A Duaccess.h42 #define __access_ok(addr, size, segment) \
43 (((segment).seg & (addr | size | (addr+size))) == 0)
103 #define __get_user_check(x, ptr, size, segment) \
108 if (__access_ok((unsigned long)__gu_addr, size, segment)) { \
218 #define __put_user_check(x, ptr, size, segment) \
222 if (__access_ok((unsigned long)__pu_addr, size, segment)) { \
/linux-4.1.27/sound/core/oss/
H A Dmulaw.c32 #define SEG_SHIFT (4) /* Left shift for segment number. */
72 * Each biased linear code has a leading 1 which identifies the segment
73 * number. The value of the segment number is equal to 7 minus the number
100 /* Convert the scaled magnitude to segment number. */ linear2ulaw()
104 * Combine the sign, segment, quantization bits; linear2ulaw()
129 * shift up by the segment number and subtract out the bias. ulaw2linear()
/linux-4.1.27/drivers/staging/rtl8188eu/include/
H A Drtw_ioctl.h82 int query_counter; /* count the number of query hits for this segment */
83 int set_counter; /* count the number of set hits for this segment */
/linux-4.1.27/arch/metag/kernel/
H A Duser_gateway.S15 * These are segment of kernel provided user code reachable from user space
25 * Each segment is 64-byte aligned. This mechanism should be used only for
/linux-4.1.27/arch/powerpc/boot/
H A Delf.h21 /* These constants are for the segment types stored in the image headers */
29 #define PT_TLS 7 /* Thread local storage segment */
/linux-4.1.27/arch/powerpc/kernel/vdso32/
H A Dvdso32.lds.S52 * Other stuff is appended to the text segment:
123 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
/linux-4.1.27/arch/powerpc/kernel/vdso64/
H A Dvdso64.lds.S51 * Other stuff is appended to the text segment:
123 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
/linux-4.1.27/fs/cachefiles/
H A Dkey.c61 max += 3 * 2; /* maximum number of segment dividers (".../M") cachefiles_cook_key()
71 max += 3 * 2; /* maximum number of segment dividers (".../M") cachefiles_cook_key()
/linux-4.1.27/fs/proc/
H A Dvmcore.c214 /* Read Elf note segment */ __read_vmcore()
303 * alloc_elfnotes_buf - allocate buffer for ELF note segment in
326 * non-contiguous objects (ELF header, ELF note segment and memory
522 * note segment.
571 * headers and sum of real size of their ELF note segment headers and
584 * and each of PT_NOTE program headers has actual ELF note segment
612 * This function is used to copy ELF note segment in the 1st kernel
615 * real ELF note segment headers and data.
619 * and each of PT_NOTE program headers has actual ELF note segment
708 * note segment.
757 * headers and sum of real size of their ELF note segment headers and
770 * and each of PT_NOTE program headers has actual ELF note segment
798 * This function is used to copy ELF note segment in the 1st kernel
801 * real ELF note segment headers and data.
805 * and each of PT_NOTE program headers has actual ELF note segment
903 /* Skip Elf header, program headers and Elf note segment. */ process_ptload_program_headers_elf64()
946 /* Skip Elf header, program headers and Elf note segment. */ process_ptload_program_headers_elf32()
982 /* Skip Elf header, program headers and Elf note segment. */ set_vmcore_list_offsets()
/linux-4.1.27/drivers/usb/wusbcore/
H A Dwa-xfer.c23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
28 * For each submitted segment request, a notification will come over
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
93 /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
123 u8 index; /* which segment we are */
124 int isoc_frame_count; /* number of isoc frames in this segment. */
395 * Mark the given segment as done. Return true if this completes the xfer.
548 * that will fit a in transfer segment.
563 * out segment if it is physically contiguous with the previous __wa_seg_calculate_isoc_frame_count()
590 * @returns < 0 on error, transfer segment request size if ok
626 /* Compute the segment size and make sure it is a multiple of __wa_xfer_setup_sizes()
735 * Callback for the OUT data phase of the segment request
742 * wa_seg_tr_cb() has already failed the segment and moved the
776 * if this is the last isoc frame of the segment, we wa_seg_dto_cb()
872 * Callback for the isoc packet descriptor phase of the segment request
879 * wa_seg_tr_cb() has already failed the segment and moved the
939 * Callback for the segment request
943 * segment done and try completion.
1034 /* advance the sg if current segment starts on or past the wa_xfer_create_subset_sg()
1040 /* the data for the current segment starts in current_xfer_sg. wa_xfer_create_subset_sg()
1047 /* calculate the number of pages needed by this segment. */ wa_xfer_create_subset_sg()
1058 * data to be transferred by this segment to the segment SG. */ wa_xfer_create_subset_sg()
1169 * drops to zero; however, because each segment is given the same life
1194 * Adjust the size of the segment object to contain space for __wa_xfer_setup_segs()
1232 * segment object memory buffer. __wa_xfer_setup_segs()
1262 * segment will be filled in and sent from the __wa_xfer_setup_segs()
1283 * Free the memory for the current segment which failed to init. __wa_xfer_setup_segs()
1377 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */ __wa_xfer_setup()
1394 /* default to done unless we encounter a multi-frame isoc segment. */ __wa_seg_submit()
1398 * Take a ref for each segment urb so the xfer cannot disappear until __wa_seg_submit()
1435 * If this segment contains more than one isoc frame, hold __wa_seg_submit()
1582 * Only attempt to acquire DTO if we have a segment __wa_xfer_submit()
1929 * asynch request] and then make sure we cancel each segment.
2021 * The buf_in data for a segment in the wa_urb_dequeue()
2130 * If a last segment flag and/or a transfer result error is encountered,
2131 * no other segment transfer results will be returned from the device.
2319 dev_err(dev, "xfer %p#%u: Bad segment state %u\n", wa_xfer_result_chew()
2335 * If the last segment bit is set, complete the remaining segments. wa_xfer_result_chew()
2336 * When the current segment is completed, either in wa_buf_in_cb for wa_xfer_result_chew()
2424 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx); wa_xfer_result_chew()
2589 * error, mark this segment done and try completion.
2755 * request accounting. If it is an IN segment, we move to RBI and post
2758 * segment, it will repost the DTI-URB.
/linux-4.1.27/kernel/
H A Dkexec.c154 ret = copy_from_user(image->segment, segments, segment_bytes); copy_user_segment_list()
183 mstart = image->segment[i].mem; sanity_check_segment_list()
184 mend = mstart + image->segment[i].memsz; sanity_check_segment_list()
194 * easy explanation as one segment stops on another. sanity_check_segment_list()
201 mstart = image->segment[i].mem; sanity_check_segment_list()
202 mend = mstart + image->segment[i].memsz; sanity_check_segment_list()
205 pstart = image->segment[j].mem; sanity_check_segment_list()
206 pend = pstart + image->segment[j].memsz; sanity_check_segment_list()
220 if (image->segment[i].bufsz > image->segment[i].memsz) sanity_check_segment_list()
239 mstart = image->segment[i].mem; sanity_check_segment_list()
240 mend = mstart + image->segment[i].memsz - 1; sanity_check_segment_list()
484 * data from user space, do error checking, prepare segment list
631 mstart = image->segment[i].mem; kimage_is_destination_range()
632 mend = mstart + image->segment[i].memsz; kimage_is_destination_range()
732 * to give it an entry in image->segment[]. kimage_alloc_normal_control_pages()
787 mstart = image->segment[i].mem; kimage_alloc_crash_control_pages()
788 mend = mstart + image->segment[i].memsz - 1; kimage_alloc_crash_control_pages()
790 /* Advance the hole to the end of the segment */ kimage_alloc_crash_control_pages()
1066 struct kexec_segment *segment) kimage_load_normal_segment()
1076 kbuf = segment->kbuf; kimage_load_normal_segment()
1078 buf = segment->buf; kimage_load_normal_segment()
1079 ubytes = segment->bufsz; kimage_load_normal_segment()
1080 mbytes = segment->memsz; kimage_load_normal_segment()
1081 maddr = segment->mem; kimage_load_normal_segment()
1133 struct kexec_segment *segment) kimage_load_crash_segment()
1147 kbuf = segment->kbuf; kimage_load_crash_segment()
1149 buf = segment->buf; kimage_load_crash_segment()
1150 ubytes = segment->bufsz; kimage_load_crash_segment()
1151 mbytes = segment->memsz; kimage_load_crash_segment()
1152 maddr = segment->mem; kimage_load_crash_segment()
1197 struct kexec_segment *segment) kimage_load_segment()
1203 result = kimage_load_normal_segment(image, segment); kimage_load_segment()
1206 result = kimage_load_crash_segment(image, segment); kimage_load_segment()
1314 result = kimage_load_segment(image, &image->segment[i]); SYSCALL_DEFINE4()
1435 ksegment = &image->segment[i]; SYSCALL_DEFINE5()
1436 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n", SYSCALL_DEFINE5()
1440 ret = kimage_load_segment(image, &image->segment[i]); SYSCALL_DEFINE5()
2111 * Helper function for placing a buffer in a kexec segment. This assumes
2124 /* Currently adding segment this way is allowed only in file mode */ kexec_add_buffer()
2170 ksegment = &image->segment[image->nr_segments]; kexec_add_buffer()
2229 ksegment = &image->segment[i]; kexec_calculate_store_digests()
2330 * segment load time. __kexec_load_purgatory()
2411 /* Add buffer to segment list */ __kexec_load_purgatory()
1065 kimage_load_normal_segment(struct kimage *image, struct kexec_segment *segment) kimage_load_normal_segment() argument
1132 kimage_load_crash_segment(struct kimage *image, struct kexec_segment *segment) kimage_load_crash_segment() argument
1196 kimage_load_segment(struct kimage *image, struct kexec_segment *segment) kimage_load_segment() argument
/linux-4.1.27/arch/tile/include/hv/
H A Ddrv_xgbe_intf.h265 * @param va_F Virtual address of first segment.
266 * @param va_L Virtual address of last segment, if 2 segments.
267 * @param len_F_L Length of first segment in low 16 bits; length of last
268 * segment, if 2 segments, in high 16 bits.
281 * @param va_L Virtual address of last segment, if 2 segments. Hard wired 0.
295 * @param va_F Virtual address of first segment.
296 * @param va_L Virtual address of last segment (third segment if 3 segments,
297 * fourth segment if 4 segments).
298 * @param len_F_L Length of first segment in low 16 bits; length of last
299 * segment in high 16 bits.
300 * @param va_M0 Virtual address of "middle 0" segment; this segment is sent
302 * @param va_M1 Virtual address of "middle 1" segment; this segment is sent
304 * @param len_M0_M1 Length of middle 0 segment in low 16 bits; length of middle
305 * 1 segment, if 4 segments, in high 16 bits.
/linux-4.1.27/drivers/media/i2c/soc_camera/
H A Dov772x.c160 #define GAM1 0x7E /* Gamma Curve 1st segment input end point */
161 #define GAM2 0x7F /* Gamma Curve 2nd segment input end point */
162 #define GAM3 0x80 /* Gamma Curve 3rd segment input end point */
163 #define GAM4 0x81 /* Gamma Curve 4th segment input end point */
164 #define GAM5 0x82 /* Gamma Curve 5th segment input end point */
165 #define GAM6 0x83 /* Gamma Curve 6th segment input end point */
166 #define GAM7 0x84 /* Gamma Curve 7th segment input end point */
167 #define GAM8 0x85 /* Gamma Curve 8th segment input end point */
168 #define GAM9 0x86 /* Gamma Curve 9th segment input end point */
169 #define GAM10 0x87 /* Gamma Curve 10th segment input end point */
170 #define GAM11 0x88 /* Gamma Curve 11th segment input end point */
171 #define GAM12 0x89 /* Gamma Curve 12th segment input end point */
172 #define GAM13 0x8A /* Gamma Curve 13th segment input end point */
173 #define GAM14 0x8B /* Gamma Curve 14th segment input end point */
174 #define GAM15 0x8C /* Gamma Curve 15th segment input end point */
175 #define SLOP 0x8D /* Gamma curve highest segment slope */
/linux-4.1.27/drivers/s390/char/
H A Dsclp_diag.h43 * @asce: region or segment table designation
H A Dtape_34xx.c36 unsigned int segment : 7; member in struct:tape_34xx_block_id
909 * is located in segment 1 of wrap 0 because this position is used tape_34xx_add_sbid()
913 if (!sbid_list || (bid.segment < 2 && bid.wrap == 0)) tape_34xx_add_sbid()
918 * acceleration uses only the segment and wrap number. So we tape_34xx_add_sbid()
919 * need only one entry for a specific wrap/segment combination. tape_34xx_add_sbid()
928 (sbid->bid.segment == bid.segment) && list_for_each()
952 sbid->bid.segment, list_for_each()
979 sbid->bid.segment, list_for_each_safe()
1003 bid->segment = 1; tape_34xx_merge_sbid()
1018 bid->segment = sbid_to_use->bid.segment;
1021 sbid_to_use->bid.segment,
/linux-4.1.27/arch/x86/kernel/acpi/
H A Dwakeup_32.S3 #include <asm/segment.h>
/linux-4.1.27/arch/x86/kernel/
H A Dtls.c36 * entry_number means "no segment at all". This never actually tls_desc_okay()
38 * this would create a 16-bit read-write segment with base and tls_desc_okay()
41 * That was close enough to "no segment at all" until we tls_desc_okay()
46 * The correct way to ask for "no segment at all" is to specify tls_desc_okay()
H A Dhead_32.S13 #include <asm/segment.h>
130 * With the kexec as boot loader, parameter segment might be loaded beyond
297 * We can't lgdt here, because lgdt itself uses a data segment, but
457 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
460 movl $(__USER_DS),%eax # DS/ES contains default USER segment
522 * segment descriptor.
586 movw %ax,2(%esp) /* clean up the segment values on some cpus */
737 * like usual segment descriptors - they consist of a 16-bit
738 * segment size, and 32-bit linear address value:
/linux-4.1.27/arch/x86/platform/efi/
H A Defi_stub_64.S10 #include <asm/segment.h>
/linux-4.1.27/arch/x86/platform/olpc/
H A Dxo1-wakeup.S3 #include <asm/segment.h>
/linux-4.1.27/arch/x86/power/
H A Dhibernate_asm_32.S10 #include <asm/segment.h>
/linux-4.1.27/arch/tile/include/uapi/asm/
H A Dmman.h25 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */
/linux-4.1.27/arch/um/kernel/
H A Duml.lds.S93 we can shorten the on-disk segment size. */
/linux-4.1.27/arch/mips/include/asm/octeon/
H A Dcvmx-address.h278 #define CVMX_ADD_SEG32(segment, add) \
279 (((int32_t)segment << 31) | (int32_t)(add))
291 #define CVMX_ADD_SEG(segment, add) ((((uint64_t)segment) << 62) | (add))
/linux-4.1.27/include/xen/interface/
H A Dfeatures.h19 * If set, the guest does not need to write-protect its segment descriptor
/linux-4.1.27/arch/score/kernel/
H A Dhead.S36 la r30, __bss_start /* initialize BSS segment. */
/linux-4.1.27/arch/mips/include/asm/sn/
H A Dmapped_kernel.h12 * compiled at cksseg segment (LOADADDR = 0xc001c000), and the
/linux-4.1.27/arch/mn10300/unit-asb2303/
H A Dleds.c1 /* ASB2303 peripheral 7-segment LEDs x1 support
/linux-4.1.27/arch/arm/vdso/
H A Dvdso.lds.S69 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
/linux-4.1.27/arch/arm64/kernel/
H A Darm64ksyms.c35 /* user mem (segment) */
/linux-4.1.27/arch/frv/mm/
H A Dkmap.c20 #include <asm/segment.h>
/linux-4.1.27/net/sunrpc/xprtrdma/
H A Dphysical_ops.c29 /* PHYSICAL memory registration conveys one page per chunk segment.
/linux-4.1.27/drivers/char/agp/
H A Dfrontend.c94 * Routines for managing each client's segment list -
196 /* End - Routines for managing each client's segment list */
828 struct agp_segment *segment; agpioc_reserve_wrap() local
833 segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count), agpioc_reserve_wrap()
836 if (segment == NULL) agpioc_reserve_wrap()
839 if (copy_from_user(segment, (void __user *) reserve.seg_list, agpioc_reserve_wrap()
841 kfree(segment); agpioc_reserve_wrap()
844 reserve.seg_list = segment; agpioc_reserve_wrap()
847 /* Create the client and add the segment */ agpioc_reserve_wrap()
851 kfree(segment); agpioc_reserve_wrap()
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A Dsec_null.c227 int segment, int newsize) null_enlarge_reqbuf()
239 oldsize = req->rq_reqbuf->lm_buflens[segment]; null_enlarge_reqbuf()
240 req->rq_reqbuf->lm_buflens[segment] = newsize; null_enlarge_reqbuf()
242 req->rq_reqbuf->lm_buflens[segment] = oldsize; null_enlarge_reqbuf()
272 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize); null_enlarge_reqbuf()
225 null_enlarge_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int segment, int newsize) null_enlarge_reqbuf() argument
/linux-4.1.27/arch/metag/include/asm/
H A Dtbx.h37 /* Id values in the TBI system describe a segment using an arbitrary
42 /* Extended segment identifiers use strings in the string table */
46 #define TBID_SEGTYPE_BITS 0x0F /* One of the predefined segment types */
48 #define TBID_SEGSCOPE_BITS 0x30 /* Indicates the scope of the segment */
54 #define TBID_SEGTYPE_TEXT 0x02 /* Code segment */
55 #define TBID_SEGTYPE_DATA 0x04 /* Data segment */
56 #define TBID_SEGTYPE_STACK 0x06 /* Stack segment */
57 #define TBID_SEGTYPE_HEAP 0x0A /* Heap segment */
59 #define TBID_SEGTYPE_STRING 0x0E /* String table segment */
67 /* For segment specifier a further field in two of the remaining bits
68 indicates the usefulness of the pGAddr field in the segment descriptor
75 /* The following values are common to both segment and signal Id value and
84 /* Privilege needed to access a segment is indicated by the next bit.
87 search for a segment - setting it yourself toggles the automatically
93 /* The top six bits of a signal/segment specifier identifies a thread within
99 #define TBID_THREAD_NULL (-32) /* Never matches any thread/segment id used */
111 /* Generate a segment Id given Thread, Scope, and Type */
712 /* The global memory map is described by a list of segment descriptors */
715 int Id; /* Id of the segment */
739 PTBISEG pSeg; /* Related segment structure */
778 segment */
929 /* Return pointer to segment list at current privilege level */
932 /* Search the segment list for a match given Id, pStart can be NULL */
935 /* Prepare a new segment structure using space from within another */
938 /* Prepare a new segment using any global or local heap segments available */
941 /* Insert a new segment into the segment list so __TBIFindSeg can locate it */
/linux-4.1.27/drivers/s390/block/
H A Ddcssblk.c82 * release function for segment device.
166 * get the highest address of the multi-segment block.
183 * get the lowest address of the multi-segment block.
273 * Load a segment
294 /* load the segment */ dcssblk_load_segment()
410 * of the segment. If the segment is busy, saving will
838 /* Request beyond end of DCSS segment. */ dcssblk_make_request()
1082 MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
/linux-4.1.27/fs/xfs/libxfs/
H A Dxfs_fs.h81 __s64 bmv_offset; /* file offset of segment in blocks */
83 __s64 bmv_length; /* length of segment, blocks */
100 __s64 bmv_offset; /* file offset of segment in blocks */
102 __s64 bmv_length; /* length of segment, blocks */
122 /* bmv_oflags values - returned for each non-header segment */
123 #define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */
124 #define BMV_OF_DELALLOC 0x2 /* segment = delayed allocation */
125 #define BMV_OF_LAST 0x4 /* segment is the last in the file */
143 * File segment locking set data type for 64 bit access.
/linux-4.1.27/net/sched/
H A Dsch_hfsc.c80 * segment are kept in order to avoid 64-bit divide operations
85 u64 sm1; /* scaled slope of the 1st segment */
86 u64 ism1; /* scaled inverse-slope of the 1st segment */
87 u64 dx; /* the x-projection of the 1st segment */
88 u64 dy; /* the y-projection of the 1st segment */
89 u64 sm2; /* scaled slope of the 2nd segment */
90 u64 ism2; /* scaled inverse-slope of the 2nd segment */
97 u64 sm1; /* scaled slope of the 1st segment */
98 u64 ism1; /* scaled inverse-slope of the 1st segment */
99 u64 dx; /* the x-projection of the 1st segment */
100 u64 dy; /* the y-projection of the 1st segment */
101 u64 sm2; /* scaled slope of the 2nd segment */
102 u64 ism2; /* scaled inverse-slope of the 2nd segment */
521 /* x belongs to the 1st segment */ rtsc_y2x()
527 /* x belongs to the 2nd segment */ rtsc_y2x()
542 /* y belongs to the 1st segment */ rtsc_x2y()
545 /* y belongs to the 2nd segment */ rtsc_x2y()
604 * check if (x, y1) belongs to the 1st segment of rtsc. rtsc_min()
/linux-4.1.27/block/
H A Dblk-merge.c2 * Functions related to segment and merge handling
44 * a segment bio_for_each_segment()
51 * never considered part of another segment, since bio_for_each_segment()
102 /* estimate segment number by bi_vcnt for non-cloned bio */ blk_recount_segments()
299 * This will form the start of a new hw segment. Bump both ll_new_hw_segment()
489 * will have updated segment counts, update sector attempt_merge()
/linux-4.1.27/arch/powerpc/kvm/
H A Dbook3s_64_mmu.c374 dprintk("KVM MMU: Trigger segment fault\n"); kvmppc_mmu_book3s_64_xlate()
426 /* Map the new segment */ kvmppc_mmu_book3s_64_slbmte()
547 * indicating page and segment sizes. kvmppc_mmu_book3s_64_tlbie()
622 * Mark this as a 64k segment if the host is using kvmppc_mmu_book3s_64_esid_to_vsid()
624 * the guest segment page size is >= 64k, kvmppc_mmu_book3s_64_esid_to_vsid()
625 * but not if this segment contains the magic page. kvmppc_mmu_book3s_64_esid_to_vsid()
/linux-4.1.27/fs/
H A Dbinfmt_flat.c77 unsigned long start_code; /* Start of text segment */
78 unsigned long start_data; /* Start of data segment */
79 unsigned long start_brk; /* End of data segment */
80 unsigned long text_len; /* Length of text segment */
366 if (r < text_len) /* In text segment */ calc_reloc()
368 else /* In data segment */ calc_reloc()
386 char *segment[] = { "TEXT", "DATA", "BSS", "*UNKNOWN*" }; old_reloc() local
400 "(address %p, currently %x) into segment %s\n", old_reloc()
401 r.reloc.offset, ptr, (int)*ptr, segment[r.reloc.type]); old_reloc()
712 * The first is the GOT which resides at the beginning of the data segment load_flat_file()
715 * data segment. These require a little more processing as the entry is load_flat_file()
806 * segment (including bss) but not argv/argc/environ.
889 /* Update data segment pointers for all libraries */ load_flat_binary()
/linux-4.1.27/drivers/misc/cxl/
H A Dmain.c105 pr_err("cxl_alloc_sst: Unable to allocate segment table\n"); cxl_alloc_sst()
117 WARN(1, "Impossible segment table size\n"); cxl_alloc_sst()
/linux-4.1.27/drivers/input/misc/
H A Dyealink.h73 * data segment bits
130 /* LCD, each segment must be driven separately.

Completed in 6150 milliseconds

123456