]> bbs.cooldavid.org Git - net-next-2.6.git/blob - sound/core/seq/seq_instr.c
[ALSA] schedule_timeout() fix for core/seq/seq_instr.c
[net-next-2.6.git] / sound / core / seq / seq_instr.c
1 /*
2  *   Generic Instrument routines for ALSA sequencer
3  *   Copyright (c) 1999 by Jaroslav Kysela <perex@suse.cz>
4  *
5  *   This program is free software; you can redistribute it and/or modify
6  *   it under the terms of the GNU General Public License as published by
7  *   the Free Software Foundation; either version 2 of the License, or
8  *   (at your option) any later version.
9  *
10  *   This program is distributed in the hope that it will be useful,
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *   GNU General Public License for more details.
14  *
15  *   You should have received a copy of the GNU General Public License
16  *   along with this program; if not, write to the Free Software
17  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18  *
19  */
20  
21 #include <sound/driver.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <sound/core.h>
25 #include "seq_clientmgr.h"
26 #include <sound/seq_instr.h>
27 #include <sound/initval.h>
28
29 MODULE_AUTHOR("Jaroslav Kysela <perex@suse.cz>");
30 MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer instrument library.");
31 MODULE_LICENSE("GPL");
32
33
34 static void snd_instr_lock_ops(struct snd_seq_kinstr_list *list)
35 {
36         if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
37                 spin_lock_irqsave(&list->ops_lock, list->ops_flags);
38         } else {
39                 mutex_lock(&list->ops_mutex);
40         }
41 }
42
43 static void snd_instr_unlock_ops(struct snd_seq_kinstr_list *list)
44 {
45         if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
46                 spin_unlock_irqrestore(&list->ops_lock, list->ops_flags);
47         } else {
48                 mutex_unlock(&list->ops_mutex);
49         }
50 }
51
52 static struct snd_seq_kinstr *snd_seq_instr_new(int add_len, int atomic)
53 {
54         struct snd_seq_kinstr *instr;
55         
56         instr = kzalloc(sizeof(struct snd_seq_kinstr) + add_len, atomic ? GFP_ATOMIC : GFP_KERNEL);
57         if (instr == NULL)
58                 return NULL;
59         instr->add_len = add_len;
60         return instr;
61 }
62
63 static int snd_seq_instr_free(struct snd_seq_kinstr *instr, int atomic)
64 {
65         int result = 0;
66
67         if (instr == NULL)
68                 return -EINVAL;
69         if (instr->ops && instr->ops->remove)
70                 result = instr->ops->remove(instr->ops->private_data, instr, 1);
71         if (!result)
72                 kfree(instr);
73         return result;
74 }
75
76 struct snd_seq_kinstr_list *snd_seq_instr_list_new(void)
77 {
78         struct snd_seq_kinstr_list *list;
79
80         list = kzalloc(sizeof(struct snd_seq_kinstr_list), GFP_KERNEL);
81         if (list == NULL)
82                 return NULL;
83         spin_lock_init(&list->lock);
84         spin_lock_init(&list->ops_lock);
85         mutex_init(&list->ops_mutex);
86         list->owner = -1;
87         return list;
88 }
89
90 void snd_seq_instr_list_free(struct snd_seq_kinstr_list **list_ptr)
91 {
92         struct snd_seq_kinstr_list *list;
93         struct snd_seq_kinstr *instr;
94         struct snd_seq_kcluster *cluster;
95         int idx;
96         unsigned long flags;
97
98         if (list_ptr == NULL)
99                 return;
100         list = *list_ptr;
101         *list_ptr = NULL;
102         if (list == NULL)
103                 return;
104         
105         for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {         
106                 while ((instr = list->hash[idx]) != NULL) {
107                         list->hash[idx] = instr->next;
108                         list->count--;
109                         spin_lock_irqsave(&list->lock, flags);
110                         while (instr->use) {
111                                 spin_unlock_irqrestore(&list->lock, flags);
112                                 schedule_timeout_uninterruptible(1);
113                                 spin_lock_irqsave(&list->lock, flags);
114                         }                               
115                         spin_unlock_irqrestore(&list->lock, flags);
116                         if (snd_seq_instr_free(instr, 0)<0)
117                                 snd_printk(KERN_WARNING "instrument free problem\n");
118                 }
119                 while ((cluster = list->chash[idx]) != NULL) {
120                         list->chash[idx] = cluster->next;
121                         list->ccount--;
122                         kfree(cluster);
123                 }
124         }
125         kfree(list);
126 }
127
128 static int instr_free_compare(struct snd_seq_kinstr *instr,
129                               struct snd_seq_instr_header *ifree,
130                               unsigned int client)
131 {
132         switch (ifree->cmd) {
133         case SNDRV_SEQ_INSTR_FREE_CMD_ALL:
134                 /* all, except private for other clients */
135                 if ((instr->instr.std & 0xff000000) == 0)
136                         return 0;
137                 if (((instr->instr.std >> 24) & 0xff) == client)
138                         return 0;
139                 return 1;
140         case SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE:
141                 /* all my private instruments */
142                 if ((instr->instr.std & 0xff000000) == 0)
143                         return 1;
144                 if (((instr->instr.std >> 24) & 0xff) == client)
145                         return 0;
146                 return 1;
147         case SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER:
148                 /* all my private instruments */
149                 if ((instr->instr.std & 0xff000000) == 0) {
150                         if (instr->instr.cluster == ifree->id.cluster)
151                                 return 0;
152                         return 1;
153                 }
154                 if (((instr->instr.std >> 24) & 0xff) == client) {
155                         if (instr->instr.cluster == ifree->id.cluster)
156                                 return 0;
157                 }
158                 return 1;
159         }
160         return 1;
161 }
162
163 int snd_seq_instr_list_free_cond(struct snd_seq_kinstr_list *list,
164                                  struct snd_seq_instr_header *ifree,
165                                  int client,
166                                  int atomic)
167 {
168         struct snd_seq_kinstr *instr, *prev, *next, *flist;
169         int idx;
170         unsigned long flags;
171
172         snd_instr_lock_ops(list);
173         for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
174                 spin_lock_irqsave(&list->lock, flags);
175                 instr = list->hash[idx];
176                 prev = flist = NULL;
177                 while (instr) {
178                         while (instr && instr_free_compare(instr, ifree, (unsigned int)client)) {
179                                 prev = instr;
180                                 instr = instr->next;
181                         }
182                         if (instr == NULL)
183                                 continue;
184                         if (instr->ops && instr->ops->notify)
185                                 instr->ops->notify(instr->ops->private_data, instr, SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
186                         next = instr->next;
187                         if (prev == NULL) {
188                                 list->hash[idx] = next;
189                         } else {
190                                 prev->next = next;
191                         }
192                         list->count--;
193                         instr->next = flist;
194                         flist = instr;
195                         instr = next;
196                 }
197                 spin_unlock_irqrestore(&list->lock, flags);
198                 while (flist) {
199                         instr = flist;
200                         flist = instr->next;
201                         while (instr->use) {
202                                 schedule_timeout_uninterruptible(1);
203                                 barrier();
204                         }
205                         if (snd_seq_instr_free(instr, atomic)<0)
206                                 snd_printk(KERN_WARNING "instrument free problem\n");
207                         instr = next;
208                 }
209         }
210         snd_instr_unlock_ops(list);
211         return 0;       
212 }
213
214 static int compute_hash_instr_key(struct snd_seq_instr *instr)
215 {
216         int result;
217         
218         result = instr->bank | (instr->prg << 16);
219         result += result >> 24;
220         result += result >> 16;
221         result += result >> 8;
222         return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
223 }
224
225 #if 0
226 static int compute_hash_cluster_key(snd_seq_instr_cluster_t cluster)
227 {
228         int result;
229         
230         result = cluster;
231         result += result >> 24;
232         result += result >> 16;
233         result += result >> 8;
234         return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
235 }
236 #endif
237
238 static int compare_instr(struct snd_seq_instr *i1, struct snd_seq_instr *i2, int exact)
239 {
240         if (exact) {
241                 if (i1->cluster != i2->cluster ||
242                     i1->bank != i2->bank ||
243                     i1->prg != i2->prg)
244                         return 1;
245                 if ((i1->std & 0xff000000) != (i2->std & 0xff000000))
246                         return 1;
247                 if (!(i1->std & i2->std))
248                         return 1;
249                 return 0;
250         } else {
251                 unsigned int client_check;
252                 
253                 if (i2->cluster && i1->cluster != i2->cluster)
254                         return 1;
255                 client_check = i2->std & 0xff000000;
256                 if (client_check) {
257                         if ((i1->std & 0xff000000) != client_check)
258                                 return 1;
259                 } else {
260                         if ((i1->std & i2->std) != i2->std)
261                                 return 1;
262                 }
263                 return i1->bank != i2->bank || i1->prg != i2->prg;
264         }
265 }
266
267 struct snd_seq_kinstr *snd_seq_instr_find(struct snd_seq_kinstr_list *list,
268                                           struct snd_seq_instr *instr,
269                                           int exact,
270                                           int follow_alias)
271 {
272         unsigned long flags;
273         int depth = 0;
274         struct snd_seq_kinstr *result;
275
276         if (list == NULL || instr == NULL)
277                 return NULL;
278         spin_lock_irqsave(&list->lock, flags);
279       __again:
280         result = list->hash[compute_hash_instr_key(instr)];
281         while (result) {
282                 if (!compare_instr(&result->instr, instr, exact)) {
283                         if (follow_alias && (result->type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)) {
284                                 instr = (struct snd_seq_instr *)KINSTR_DATA(result);
285                                 if (++depth > 10)
286                                         goto __not_found;
287                                 goto __again;
288                         }
289                         result->use++;
290                         spin_unlock_irqrestore(&list->lock, flags);
291                         return result;
292                 }
293                 result = result->next;
294         }
295       __not_found:
296         spin_unlock_irqrestore(&list->lock, flags);
297         return NULL;
298 }
299
300 void snd_seq_instr_free_use(struct snd_seq_kinstr_list *list,
301                             struct snd_seq_kinstr *instr)
302 {
303         unsigned long flags;
304
305         if (list == NULL || instr == NULL)
306                 return;
307         spin_lock_irqsave(&list->lock, flags);
308         if (instr->use <= 0) {
309                 snd_printk(KERN_ERR "free_use: fatal!!! use = %i, name = '%s'\n", instr->use, instr->name);
310         } else {
311                 instr->use--;
312         }
313         spin_unlock_irqrestore(&list->lock, flags);
314 }
315
316 static struct snd_seq_kinstr_ops *instr_ops(struct snd_seq_kinstr_ops *ops,
317                                             char *instr_type)
318 {
319         while (ops) {
320                 if (!strcmp(ops->instr_type, instr_type))
321                         return ops;
322                 ops = ops->next;
323         }
324         return NULL;
325 }
326
327 static int instr_result(struct snd_seq_event *ev,
328                         int type, int result,
329                         int atomic)
330 {
331         struct snd_seq_event sev;
332         
333         memset(&sev, 0, sizeof(sev));
334         sev.type = SNDRV_SEQ_EVENT_RESULT;
335         sev.flags = SNDRV_SEQ_TIME_STAMP_REAL | SNDRV_SEQ_EVENT_LENGTH_FIXED |
336                     SNDRV_SEQ_PRIORITY_NORMAL;
337         sev.source = ev->dest;
338         sev.dest = ev->source;
339         sev.data.result.event = type;
340         sev.data.result.result = result;
341 #if 0
342         printk("instr result - type = %i, result = %i, queue = %i, source.client:port = %i:%i, dest.client:port = %i:%i\n",
343                                 type, result,
344                                 sev.queue,
345                                 sev.source.client, sev.source.port,
346                                 sev.dest.client, sev.dest.port);
347 #endif
348         return snd_seq_kernel_client_dispatch(sev.source.client, &sev, atomic, 0);
349 }
350
351 static int instr_begin(struct snd_seq_kinstr_ops *ops,
352                        struct snd_seq_kinstr_list *list,
353                        struct snd_seq_event *ev,
354                        int atomic, int hop)
355 {
356         unsigned long flags;
357
358         spin_lock_irqsave(&list->lock, flags);
359         if (list->owner >= 0 && list->owner != ev->source.client) {
360                 spin_unlock_irqrestore(&list->lock, flags);
361                 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, -EBUSY, atomic);
362         }
363         list->owner = ev->source.client;
364         spin_unlock_irqrestore(&list->lock, flags);
365         return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, 0, atomic);
366 }
367
368 static int instr_end(struct snd_seq_kinstr_ops *ops,
369                      struct snd_seq_kinstr_list *list,
370                      struct snd_seq_event *ev,
371                      int atomic, int hop)
372 {
373         unsigned long flags;
374
375         /* TODO: timeout handling */
376         spin_lock_irqsave(&list->lock, flags);
377         if (list->owner == ev->source.client) {
378                 list->owner = -1;
379                 spin_unlock_irqrestore(&list->lock, flags);
380                 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, 0, atomic);
381         }
382         spin_unlock_irqrestore(&list->lock, flags);
383         return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, -EINVAL, atomic);
384 }
385
386 static int instr_info(struct snd_seq_kinstr_ops *ops,
387                       struct snd_seq_kinstr_list *list,
388                       struct snd_seq_event *ev,
389                       int atomic, int hop)
390 {
391         return -ENXIO;
392 }
393
394 static int instr_format_info(struct snd_seq_kinstr_ops *ops,
395                              struct snd_seq_kinstr_list *list,
396                              struct snd_seq_event *ev,
397                              int atomic, int hop)
398 {
399         return -ENXIO;
400 }
401
402 static int instr_reset(struct snd_seq_kinstr_ops *ops,
403                        struct snd_seq_kinstr_list *list,
404                        struct snd_seq_event *ev,
405                        int atomic, int hop)
406 {
407         return -ENXIO;
408 }
409
410 static int instr_status(struct snd_seq_kinstr_ops *ops,
411                         struct snd_seq_kinstr_list *list,
412                         struct snd_seq_event *ev,
413                         int atomic, int hop)
414 {
415         return -ENXIO;
416 }
417
418 static int instr_put(struct snd_seq_kinstr_ops *ops,
419                      struct snd_seq_kinstr_list *list,
420                      struct snd_seq_event *ev,
421                      int atomic, int hop)
422 {
423         unsigned long flags;
424         struct snd_seq_instr_header put;
425         struct snd_seq_kinstr *instr;
426         int result = -EINVAL, len, key;
427
428         if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
429                 goto __return;
430
431         if (ev->data.ext.len < sizeof(struct snd_seq_instr_header))
432                 goto __return;
433         if (copy_from_user(&put, (void __user *)ev->data.ext.ptr,
434                            sizeof(struct snd_seq_instr_header))) {
435                 result = -EFAULT;
436                 goto __return;
437         }
438         snd_instr_lock_ops(list);
439         if (put.id.instr.std & 0xff000000) {    /* private instrument */
440                 put.id.instr.std &= 0x00ffffff;
441                 put.id.instr.std |= (unsigned int)ev->source.client << 24;
442         }
443         if ((instr = snd_seq_instr_find(list, &put.id.instr, 1, 0))) {
444                 snd_seq_instr_free_use(list, instr);
445                 snd_instr_unlock_ops(list);
446                 result = -EBUSY;
447                 goto __return;
448         }
449         ops = instr_ops(ops, put.data.data.format);
450         if (ops == NULL) {
451                 snd_instr_unlock_ops(list);
452                 goto __return;
453         }
454         len = ops->add_len;
455         if (put.data.type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)
456                 len = sizeof(struct snd_seq_instr);
457         instr = snd_seq_instr_new(len, atomic);
458         if (instr == NULL) {
459                 snd_instr_unlock_ops(list);
460                 result = -ENOMEM;
461                 goto __return;
462         }
463         instr->ops = ops;
464         instr->instr = put.id.instr;
465         strlcpy(instr->name, put.data.name, sizeof(instr->name));
466         instr->type = put.data.type;
467         if (instr->type == SNDRV_SEQ_INSTR_ATYPE_DATA) {
468                 result = ops->put(ops->private_data,
469                                   instr,
470                                   (void __user *)ev->data.ext.ptr + sizeof(struct snd_seq_instr_header),
471                                   ev->data.ext.len - sizeof(struct snd_seq_instr_header),
472                                   atomic,
473                                   put.cmd);
474                 if (result < 0) {
475                         snd_seq_instr_free(instr, atomic);
476                         snd_instr_unlock_ops(list);
477                         goto __return;
478                 }
479         }
480         key = compute_hash_instr_key(&instr->instr);
481         spin_lock_irqsave(&list->lock, flags);
482         instr->next = list->hash[key];
483         list->hash[key] = instr;
484         list->count++;
485         spin_unlock_irqrestore(&list->lock, flags);
486         snd_instr_unlock_ops(list);
487         result = 0;
488       __return:
489         instr_result(ev, SNDRV_SEQ_EVENT_INSTR_PUT, result, atomic);
490         return result;
491 }
492
493 static int instr_get(struct snd_seq_kinstr_ops *ops,
494                      struct snd_seq_kinstr_list *list,
495                      struct snd_seq_event *ev,
496                      int atomic, int hop)
497 {
498         return -ENXIO;
499 }
500
501 static int instr_free(struct snd_seq_kinstr_ops *ops,
502                       struct snd_seq_kinstr_list *list,
503                       struct snd_seq_event *ev,
504                       int atomic, int hop)
505 {
506         struct snd_seq_instr_header ifree;
507         struct snd_seq_kinstr *instr, *prev;
508         int result = -EINVAL;
509         unsigned long flags;
510         unsigned int hash;
511
512         if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
513                 goto __return;
514
515         if (ev->data.ext.len < sizeof(struct snd_seq_instr_header))
516                 goto __return;
517         if (copy_from_user(&ifree, (void __user *)ev->data.ext.ptr,
518                            sizeof(struct snd_seq_instr_header))) {
519                 result = -EFAULT;
520                 goto __return;
521         }
522         if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_ALL ||
523             ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE ||
524             ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER) {
525                 result = snd_seq_instr_list_free_cond(list, &ifree, ev->dest.client, atomic);
526                 goto __return;
527         }
528         if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_SINGLE) {
529                 if (ifree.id.instr.std & 0xff000000) {
530                         ifree.id.instr.std &= 0x00ffffff;
531                         ifree.id.instr.std |= (unsigned int)ev->source.client << 24;
532                 }
533                 hash = compute_hash_instr_key(&ifree.id.instr);
534                 snd_instr_lock_ops(list);
535                 spin_lock_irqsave(&list->lock, flags);
536                 instr = list->hash[hash];
537                 prev = NULL;
538                 while (instr) {
539                         if (!compare_instr(&instr->instr, &ifree.id.instr, 1))
540                                 goto __free_single;
541                         prev = instr;
542                         instr = instr->next;
543                 }
544                 result = -ENOENT;
545                 spin_unlock_irqrestore(&list->lock, flags);
546                 snd_instr_unlock_ops(list);
547                 goto __return;
548                 
549               __free_single:
550                 if (prev) {
551                         prev->next = instr->next;
552                 } else {
553                         list->hash[hash] = instr->next;
554                 }
555                 if (instr->ops && instr->ops->notify)
556                         instr->ops->notify(instr->ops->private_data, instr,
557                                            SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
558                 while (instr->use) {
559                         spin_unlock_irqrestore(&list->lock, flags);
560                         schedule_timeout_uninterruptible(1);
561                         spin_lock_irqsave(&list->lock, flags);
562                 }                               
563                 spin_unlock_irqrestore(&list->lock, flags);
564                 result = snd_seq_instr_free(instr, atomic);
565                 snd_instr_unlock_ops(list);
566                 goto __return;
567         }
568
569       __return:
570         instr_result(ev, SNDRV_SEQ_EVENT_INSTR_FREE, result, atomic);
571         return result;
572 }
573
574 static int instr_list(struct snd_seq_kinstr_ops *ops,
575                       struct snd_seq_kinstr_list *list,
576                       struct snd_seq_event *ev,
577                       int atomic, int hop)
578 {
579         return -ENXIO;
580 }
581
582 static int instr_cluster(struct snd_seq_kinstr_ops *ops,
583                          struct snd_seq_kinstr_list *list,
584                          struct snd_seq_event *ev,
585                          int atomic, int hop)
586 {
587         return -ENXIO;
588 }
589
590 int snd_seq_instr_event(struct snd_seq_kinstr_ops *ops,
591                         struct snd_seq_kinstr_list *list,
592                         struct snd_seq_event *ev,
593                         int client,
594                         int atomic,
595                         int hop)
596 {
597         int direct = 0;
598
599         snd_assert(ops != NULL && list != NULL && ev != NULL, return -EINVAL);
600         if (snd_seq_ev_is_direct(ev)) {
601                 direct = 1;
602                 switch (ev->type) {
603                 case SNDRV_SEQ_EVENT_INSTR_BEGIN:
604                         return instr_begin(ops, list, ev, atomic, hop);
605                 case SNDRV_SEQ_EVENT_INSTR_END:
606                         return instr_end(ops, list, ev, atomic, hop);
607                 }
608         }
609         if ((list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT) && !direct)
610                 return -EINVAL;
611         switch (ev->type) {
612         case SNDRV_SEQ_EVENT_INSTR_INFO:
613                 return instr_info(ops, list, ev, atomic, hop);
614         case SNDRV_SEQ_EVENT_INSTR_FINFO:
615                 return instr_format_info(ops, list, ev, atomic, hop);
616         case SNDRV_SEQ_EVENT_INSTR_RESET:
617                 return instr_reset(ops, list, ev, atomic, hop);
618         case SNDRV_SEQ_EVENT_INSTR_STATUS:
619                 return instr_status(ops, list, ev, atomic, hop);
620         case SNDRV_SEQ_EVENT_INSTR_PUT:
621                 return instr_put(ops, list, ev, atomic, hop);
622         case SNDRV_SEQ_EVENT_INSTR_GET:
623                 return instr_get(ops, list, ev, atomic, hop);
624         case SNDRV_SEQ_EVENT_INSTR_FREE:
625                 return instr_free(ops, list, ev, atomic, hop);
626         case SNDRV_SEQ_EVENT_INSTR_LIST:
627                 return instr_list(ops, list, ev, atomic, hop);
628         case SNDRV_SEQ_EVENT_INSTR_CLUSTER:
629                 return instr_cluster(ops, list, ev, atomic, hop);
630         }
631         return -EINVAL;
632 }
633                         
634 /*
635  *  Init part
636  */
637
638 static int __init alsa_seq_instr_init(void)
639 {
640         return 0;
641 }
642
643 static void __exit alsa_seq_instr_exit(void)
644 {
645 }
646
647 module_init(alsa_seq_instr_init)
648 module_exit(alsa_seq_instr_exit)
649
650 EXPORT_SYMBOL(snd_seq_instr_list_new);
651 EXPORT_SYMBOL(snd_seq_instr_list_free);
652 EXPORT_SYMBOL(snd_seq_instr_list_free_cond);
653 EXPORT_SYMBOL(snd_seq_instr_find);
654 EXPORT_SYMBOL(snd_seq_instr_free_use);
655 EXPORT_SYMBOL(snd_seq_instr_event);