@@ -4054,6 +4054,7 @@ static const char readme_msg[] =
40544054 " x86-tsc: TSC cycle counter\n"
40554055#endif
40564056 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4057+ "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
40574058 " tracing_cpumask\t- Limit which CPUs to trace\n"
40584059 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
40594060 "\t\t\t Remove sub-buffer with rmdir\n"
@@ -5514,35 +5515,15 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
55145515 return 0 ;
55155516}
55165517
5517- static ssize_t
5518- tracing_mark_write ( struct file * filp , const char __user * ubuf ,
5519- size_t cnt , loff_t * fpos )
5518+ static inline int lock_user_pages ( const char __user * ubuf , size_t cnt ,
5519+ struct page * * pages , void * * map_page ,
5520+ int * offset )
55205521{
55215522 unsigned long addr = (unsigned long )ubuf ;
5522- struct trace_array * tr = filp -> private_data ;
5523- struct ring_buffer_event * event ;
5524- struct ring_buffer * buffer ;
5525- struct print_entry * entry ;
5526- unsigned long irq_flags ;
5527- struct page * pages [2 ];
5528- void * map_page [2 ];
55295523 int nr_pages = 1 ;
5530- ssize_t written ;
5531- int offset ;
5532- int size ;
5533- int len ;
55345524 int ret ;
55355525 int i ;
55365526
5537- if (tracing_disabled )
5538- return - EINVAL ;
5539-
5540- if (!(tr -> trace_flags & TRACE_ITER_MARKERS ))
5541- return - EINVAL ;
5542-
5543- if (cnt > TRACE_BUF_SIZE )
5544- cnt = TRACE_BUF_SIZE ;
5545-
55465527 /*
55475528 * Userspace is injecting traces into the kernel trace buffer.
55485529 * We want to be as non intrusive as possible.
@@ -5557,26 +5538,70 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
55575538 * pages directly. We then write the data directly into the
55585539 * ring buffer.
55595540 */
5560- BUILD_BUG_ON (TRACE_BUF_SIZE >= PAGE_SIZE );
55615541
55625542 /* check if we cross pages */
55635543 if ((addr & PAGE_MASK ) != ((addr + cnt ) & PAGE_MASK ))
55645544 nr_pages = 2 ;
55655545
5566- offset = addr & (PAGE_SIZE - 1 );
5546+ * offset = addr & (PAGE_SIZE - 1 );
55675547 addr &= PAGE_MASK ;
55685548
55695549 ret = get_user_pages_fast (addr , nr_pages , 0 , pages );
55705550 if (ret < nr_pages ) {
55715551 while (-- ret >= 0 )
55725552 put_page (pages [ret ]);
5573- written = - EFAULT ;
5574- goto out ;
5553+ return - EFAULT ;
55755554 }
55765555
55775556 for (i = 0 ; i < nr_pages ; i ++ )
55785557 map_page [i ] = kmap_atomic (pages [i ]);
55795558
5559+ return nr_pages ;
5560+ }
5561+
5562+ static inline void unlock_user_pages (struct page * * pages ,
5563+ void * * map_page , int nr_pages )
5564+ {
5565+ int i ;
5566+
5567+ for (i = nr_pages - 1 ; i >= 0 ; i -- ) {
5568+ kunmap_atomic (map_page [i ]);
5569+ put_page (pages [i ]);
5570+ }
5571+ }
5572+
5573+ static ssize_t
5574+ tracing_mark_write (struct file * filp , const char __user * ubuf ,
5575+ size_t cnt , loff_t * fpos )
5576+ {
5577+ struct trace_array * tr = filp -> private_data ;
5578+ struct ring_buffer_event * event ;
5579+ struct ring_buffer * buffer ;
5580+ struct print_entry * entry ;
5581+ unsigned long irq_flags ;
5582+ struct page * pages [2 ];
5583+ void * map_page [2 ];
5584+ int nr_pages = 1 ;
5585+ ssize_t written ;
5586+ int offset ;
5587+ int size ;
5588+ int len ;
5589+
5590+ if (tracing_disabled )
5591+ return - EINVAL ;
5592+
5593+ if (!(tr -> trace_flags & TRACE_ITER_MARKERS ))
5594+ return - EINVAL ;
5595+
5596+ if (cnt > TRACE_BUF_SIZE )
5597+ cnt = TRACE_BUF_SIZE ;
5598+
5599+ BUILD_BUG_ON (TRACE_BUF_SIZE >= PAGE_SIZE );
5600+
5601+ nr_pages = lock_user_pages (ubuf , cnt , pages , map_page , & offset );
5602+ if (nr_pages < 0 )
5603+ return nr_pages ;
5604+
55805605 local_save_flags (irq_flags );
55815606 size = sizeof (* entry ) + cnt + 2 ; /* possible \n added */
55825607 buffer = tr -> trace_buffer .buffer ;
@@ -5611,11 +5636,79 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
56115636 * fpos += written ;
56125637
56135638 out_unlock :
5614- for (i = nr_pages - 1 ; i >= 0 ; i -- ) {
5615- kunmap_atomic (map_page [i ]);
5616- put_page (pages [i ]);
5639+ unlock_user_pages (pages , map_page , nr_pages );
5640+
5641+ return written ;
5642+ }
5643+
5644+ /* Limit it for now to 3K (including tag) */
5645+ #define RAW_DATA_MAX_SIZE (1024*3)
5646+
5647+ static ssize_t
5648+ tracing_mark_raw_write (struct file * filp , const char __user * ubuf ,
5649+ size_t cnt , loff_t * fpos )
5650+ {
5651+ struct trace_array * tr = filp -> private_data ;
5652+ struct ring_buffer_event * event ;
5653+ struct ring_buffer * buffer ;
5654+ struct raw_data_entry * entry ;
5655+ unsigned long irq_flags ;
5656+ struct page * pages [2 ];
5657+ void * map_page [2 ];
5658+ int nr_pages = 1 ;
5659+ ssize_t written ;
5660+ int offset ;
5661+ int size ;
5662+ int len ;
5663+
5664+ if (tracing_disabled )
5665+ return - EINVAL ;
5666+
5667+ if (!(tr -> trace_flags & TRACE_ITER_MARKERS ))
5668+ return - EINVAL ;
5669+
5670+ /* The marker must at least have a tag id */
5671+ if (cnt < sizeof (unsigned int ) || cnt > RAW_DATA_MAX_SIZE )
5672+ return - EINVAL ;
5673+
5674+ if (cnt > TRACE_BUF_SIZE )
5675+ cnt = TRACE_BUF_SIZE ;
5676+
5677+ BUILD_BUG_ON (TRACE_BUF_SIZE >= PAGE_SIZE );
5678+
5679+ nr_pages = lock_user_pages (ubuf , cnt , pages , map_page , & offset );
5680+ if (nr_pages < 0 )
5681+ return nr_pages ;
5682+
5683+ local_save_flags (irq_flags );
5684+ size = sizeof (* entry ) + cnt ;
5685+ buffer = tr -> trace_buffer .buffer ;
5686+ event = trace_buffer_lock_reserve (buffer , TRACE_RAW_DATA , size ,
5687+ irq_flags , preempt_count ());
5688+ if (!event ) {
5689+ /* Ring buffer disabled, return as if not open for write */
5690+ written = - EBADF ;
5691+ goto out_unlock ;
56175692 }
5618- out :
5693+
5694+ entry = ring_buffer_event_data (event );
5695+
5696+ if (nr_pages == 2 ) {
5697+ len = PAGE_SIZE - offset ;
5698+ memcpy (& entry -> id , map_page [0 ] + offset , len );
5699+ memcpy (((char * )& entry -> id ) + len , map_page [1 ], cnt - len );
5700+ } else
5701+ memcpy (& entry -> id , map_page [0 ] + offset , cnt );
5702+
5703+ __buffer_unlock_commit (buffer , event );
5704+
5705+ written = cnt ;
5706+
5707+ * fpos += written ;
5708+
5709+ out_unlock :
5710+ unlock_user_pages (pages , map_page , nr_pages );
5711+
56195712 return written ;
56205713}
56215714
@@ -5945,6 +6038,13 @@ static const struct file_operations tracing_mark_fops = {
59456038 .release = tracing_release_generic_tr ,
59466039};
59476040
6041+ static const struct file_operations tracing_mark_raw_fops = {
6042+ .open = tracing_open_generic_tr ,
6043+ .write = tracing_mark_raw_write ,
6044+ .llseek = generic_file_llseek ,
6045+ .release = tracing_release_generic_tr ,
6046+ };
6047+
59486048static const struct file_operations trace_clock_fops = {
59496049 .open = tracing_clock_open ,
59506050 .read = seq_read ,
@@ -7214,6 +7314,9 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
72147314 trace_create_file ("trace_marker" , 0220 , d_tracer ,
72157315 tr , & tracing_mark_fops );
72167316
7317+ trace_create_file ("trace_marker_raw" , 0220 , d_tracer ,
7318+ tr , & tracing_mark_raw_fops );
7319+
72177320 trace_create_file ("trace_clock" , 0644 , d_tracer , tr ,
72187321 & trace_clock_fops );
72197322
0 commit comments