From 136681b8bac6947eb403c669da20b0bdd4310ea0 Mon Sep 17 00:00:00 2001 From: Han Gao Date: Thu, 19 Sep 2024 07:34:39 +0800 Subject: [PATCH] sync: xuantie: vpu-vc8000e SDK v2.0.2 code Signed-off-by: Han Gao Signed-off-by: Han Gao --- drivers/soc/xuantie/Kconfig | 1 + drivers/soc/xuantie/Makefile | 1 + .../soc/xuantie/vpu-vc8000e-kernel/Android.mk | 10 + .../xuantie/vpu-vc8000e-kernel/Android.mk.def | 7 + .../soc/xuantie/vpu-vc8000e-kernel/COPYING | 674 ++ .../soc/xuantie/vpu-vc8000e-kernel/Kconfig | 3 + .../soc/xuantie/vpu-vc8000e-kernel/Makefile | 91 + .../soc/xuantie/vpu-vc8000e-kernel/README.md | 0 .../vpu-vc8000e-kernel/addons/ko/insmod.sh | 5 + .../vpu-vc8000e-kernel/addons/ko/rmmod.sh | 3 + .../linux/kernel_module/Android.mk | 28 + .../linux/kernel_module/Makefile | 211 + .../linux/kernel_module/README | 42 + .../linux/kernel_module/bidirect_list.c | 222 + .../linux/kernel_module/bidirect_list.h | 116 + .../linux/kernel_module/driver_load.sh | 61 + .../linux/kernel_module/hantro_mmu.c | 1911 ++++++ .../linux/kernel_module/hantrommu.h | 155 + .../linux/kernel_module/vc8000_axife.c | 98 + .../linux/kernel_module/vc8000_axife.h | 77 + .../linux/kernel_module/vc8000_devfreq.h | 54 + .../linux/kernel_module/vc8000_driver.c | 102 + .../linux/kernel_module/vc8000_driver.h | 349 + .../kernel_module/vc8000_normal_driver.c | 1459 +++++ .../linux/kernel_module/vc8000_vcmd_driver.c | 5594 +++++++++++++++++ .../linux/kernel_module/vcmdregisterenum.h | 157 + .../linux/kernel_module/vcmdregistertable.h | 157 + .../linux/kernel_module/vcmdswhwregisters.c | 180 + .../linux/kernel_module/vcmdswhwregisters.h | 244 + .../linux/kernel_module/venc_trace_point.h | 36 + include/trace/events/venc_trace_point.h | 36 + 31 files changed, 12084 insertions(+) create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/Android.mk create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/Android.mk.def create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/COPYING create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/Kconfig create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/Makefile create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/README.md create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/addons/ko/insmod.sh create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/addons/ko/rmmod.sh create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/Android.mk create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/Makefile create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/README create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/bidirect_list.c create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/bidirect_list.h create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/driver_load.sh create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/hantro_mmu.c create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/hantrommu.h create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_axife.c create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_axife.h create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_devfreq.h create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_driver.c create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_driver.h create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_normal_driver.c create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_vcmd_driver.c create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdregisterenum.h create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdregistertable.h create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdswhwregisters.c create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdswhwregisters.h create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/venc_trace_point.h create mode 100644 include/trace/events/venc_trace_point.h diff --git a/drivers/soc/xuantie/Kconfig b/drivers/soc/xuantie/Kconfig index 3dcf023d43b8b6..82ddcf3895d7cf 100644 --- a/drivers/soc/xuantie/Kconfig +++ b/drivers/soc/xuantie/Kconfig @@ -30,4 +30,5 @@ config TH1520_IOPMP source "drivers/soc/xuantie/nna/Kconfig" source "drivers/soc/xuantie/video_memory/Kconfig" source "drivers/soc/xuantie/vpu-vc8000d-kernel/Kconfig" +source "drivers/soc/xuantie/vpu-vc8000e-kernel/Kconfig" endmenu diff --git a/drivers/soc/xuantie/Makefile b/drivers/soc/xuantie/Makefile index 230c1753705f8b..11460835e6b148 100644 --- a/drivers/soc/xuantie/Makefile +++ b/drivers/soc/xuantie/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_TH1520_IOPMP) += th1520-iopmp.o obj-y += nna/ obj-$(CONFIG_VIDEO_MEMORY) += video_memory/driver/ obj-$(CONFIG_VIDEO_VC8000D) += vpu-vc8000d-kernel/linux/subsys_driver/ vpu-vc8000d-kernel/linux/memalloc/ +obj-$(CONFIG_VIDEO_VC8000E) += vpu-vc8000e-kernel/linux/kernel_module/ diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/Android.mk b/drivers/soc/xuantie/vpu-vc8000e-kernel/Android.mk new file mode 100644 index 00000000000000..906d14d0cbec1a --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/Android.mk @@ -0,0 +1,10 @@ +## + # Copyright (C) 2021 Alibaba Group Holding Limited +## + +LOCAL_PATH := $(call my-dir) + +include $(LOCAL_PATH)/Android.mk.def + +include $(LOCAL_PATH)/linux/kernel_module/Android.mk + diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/Android.mk.def b/drivers/soc/xuantie/vpu-vc8000e-kernel/Android.mk.def new file mode 100644 index 00000000000000..ee39557ceccaab --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/Android.mk.def @@ -0,0 +1,7 @@ +-include device/thead/common/build/common.mk.def +-include vendor/thead/build/make/common.mk.def + + +BUILD_VENDOR_TEST = 1 +PLATFORM_VENDOR = 1 + diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/COPYING b/drivers/soc/xuantie/vpu-vc8000e-kernel/COPYING new file mode 100644 index 00000000000000..94a9ed024d3859 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/Kconfig b/drivers/soc/xuantie/vpu-vc8000e-kernel/Kconfig new file mode 100644 index 00000000000000..0ac10b0e99679b --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/Kconfig @@ -0,0 +1,3 @@ +config VIDEO_VC8000E + tristate "VC8000E support" + default m diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/Makefile b/drivers/soc/xuantie/vpu-vc8000e-kernel/Makefile new file mode 100644 index 00000000000000..b80a2a91cb9ef5 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/Makefile @@ -0,0 +1,91 @@ +## + # Copyright (C) 2020 Alibaba Group Holding Limited +## +ifneq ($(wildcard ../.param),) + include ../.param +endif + +#CONFIG_DEBUG_MODE=1 +CONFIG_OUT_ENV=hwlinux + +CONFIG_BUILD_DRV_EXTRA_PARAM:="" + +DIR_TARGET_BASE=bsp/venc +DIR_TARGET_KO =bsp/venc/ko +LINUX_DIR ?= $(OUT)/obj/KERNEL_OBJ/ + +MODULE_NAME=VENC +BUILD_LOG_START="\033[47;30m>>> $(MODULE_NAME) $@ begin\033[0m" +BUILD_LOG_END ="\033[47;30m<<< $(MODULE_NAME) $@ end\033[0m" + +# +# Do a parallel build with multiple jobs, based on the number of CPUs online +# in this system: 'make -j8' on a 8-CPU system, etc. +# +# (To override it, run 'make JOBS=1' and similar.) +# + +ifeq ($(JOBS),) + JOBS := $(shell grep -c ^processor /proc/cpuinfo 2>/dev/null) + ifeq ($(JOBS),) + JOBS := 1 + endif +endif + +all: info driver install_local_output +.PHONY: info driver install_local_output install_addons install_prepare clean_driver clean_output clean + +info: + @echo $(BUILD_LOG_START) + @echo " ====== Build Info from repo project ======" + @echo " BUILDROOT_DIR="$(BUILDROOT_DIR) + @echo " CROSS_COMPILE="$(CROSS_COMPILE) + @echo " LINUX_DIR="$(LINUX_DIR) + @echo " ARCH="$(ARCH) + @echo " BOARD_NAME="$(BOARD_NAME) + @echo " KERNEL_ID="$(KERNELVERSION) + @echo " KERNEL_DIR="$(LINUX_DIR) + @echo " INSTALL_DIR_ROOTFS="$(INSTALL_DIR_ROOTFS) + @echo " INSTALL_DIR_SDK="$(INSTALL_DIR_SDK) + @echo " ====== Build configuration by settings ======" +# @echo " CONFIG_DEBUG_MODE="$(CONFIG_DEBUG_MODE) + @echo " CONFIG_OUT_ENV="$(CONFIG_OUT_ENV) + @echo " JOBS="$(JOBS) + @echo $(BUILD_LOG_END) + +driver: + @echo $(BUILD_LOG_START) + make -C linux/kernel_module KDIR=$(LINUX_DIR) ARCH=$(ARCH) + @echo $(BUILD_LOG_END) + +clean_driver: + @echo $(BUILD_LOG_START) + make -C linux/kernel_module KDIR=$(LINUX_DIR) clean + @echo $(BUILD_LOG_END) + +install_prepare: + mkdir -p ./output/rootfs/$(DIR_TARGET_KO) + +install_addons: install_prepare + @if [ -d addons/ko ]; then \ + cp -rf addons/ko/* ./output/rootfs/$(DIR_TARGET_KO); \ + fi + +install_local_output: install_addons install_prepare driver + @echo $(BUILD_LOG_START) + find ./linux -name "*.ko" | xargs -i cp -f {} ./output/rootfs/$(DIR_TARGET_KO) + cp -f ./linux/kernel_module/driver_load.sh ./output/rootfs/$(DIR_TARGET_KO) + chmod +x ./output/rootfs/$(DIR_TARGET_KO)/*.sh + echo "vc8000" > ./output/rootfs/$(DIR_TARGET_KO)/vc8000e.conf + @if [ `command -v tree` != "" ]; then \ + tree ./output/rootfs; \ + fi + @echo $(BUILD_LOG_END) + +clean_output: + @echo $(BUILD_LOG_START) + rm -rf ./output + @echo $(BUILD_LOG_END) + +clean: clean_output clean_driver + diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/README.md b/drivers/soc/xuantie/vpu-vc8000e-kernel/README.md new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/addons/ko/insmod.sh b/drivers/soc/xuantie/vpu-vc8000e-kernel/addons/ko/insmod.sh new file mode 100755 index 00000000000000..cd6783296267ac --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/addons/ko/insmod.sh @@ -0,0 +1,5 @@ +#!/bin/sh +KERNEL_VER=$(uname -r) +BASE_PATH=/lib/modules/${KERNEL_VER}/extra + +insmod $BASE_PATH/vc8000.ko diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/addons/ko/rmmod.sh b/drivers/soc/xuantie/vpu-vc8000e-kernel/addons/ko/rmmod.sh new file mode 100755 index 00000000000000..7b55b5b0a08c9e --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/addons/ko/rmmod.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +rmmod vc8000 diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/Android.mk b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/Android.mk new file mode 100644 index 00000000000000..448a9b78be2f74 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/Android.mk @@ -0,0 +1,28 @@ +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) + +VPU_OUT := $(TARGET_OUT_INTERMEDIATES)/VPU_OBJ +VC8000_KO = $(VPU_OUT)/ko/vc8000.ko +VC8000_DIR := $(LOCAL_PATH) + +$(VC8000_KO): + $(MAKE_TOOL) -C $(VC8000_DIR) KDIR=$(KERNEL_DIR) CROSS_COMPILE=$(CROSS_COMPILE) ARCH=$(ARCH); \ + cp $(VC8000_DIR)/vc8000.ko $(VC8000_KO) + +LOCAL_PREBUILT_MODULE_FILE := \ + $(VC8000_KO) + +LOCAL_GENERATED_SOURCES += \ + $(VC8000_KO) + +LOCAL_MODULE_RELATIVE_PATH := modules + +LOCAL_MODULE := vc8000 +LOCAL_MODULE_SUFFIX := .ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_CLASS := SHARED_LIBRARIES +LOCAL_VENDOR_MODULE := true +LOCAL_STRIP_MODULE := false + +include $(BUILD_PREBUILT) diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/Makefile b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/Makefile new file mode 100755 index 00000000000000..f9c8b2248af9e8 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/Makefile @@ -0,0 +1,211 @@ +############################################################################# +# +# The MIT License (MIT) +# +# Copyright (c) 2014 - 2021 VERISILICON +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +############################################################################# +# +# The GPL License (GPL) +# +# Copyright (C) 2014 - 2021 VERISILICON +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +############################################################################# +# +# Note: This software is released under dual MIT and GPL licenses. A +# recipient may use this file under the terms of either the MIT license or +# GPL License. If you wish to use only one license not the other, you can +# indicate your decision by deleting one of the above license notices in your +# version of this file. +# +############################################################################## +ARM_CROSS_COMPILE ?= n + +ifeq ($(ARM_CROSS_COMPILE),y) +export ARCH=arm64 +export CROSS_COMPILE=/opt/kmb/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu/bin/aarch64-linux-gnu- +KDIR := /home/vsi/kmb-evm/kernel/mainline-tracking +endif + +SUPPORT_MMU = y +SUPPORT_AXIFE = n +SUPPORT_VCMD_ENABLE_IP = n + +ifeq ($(obj),) +obj = . +endif + +################################################# +# configuration + +MDIR := hantro + +# drivers objects +# list-multi := hantro_mmu.o + +# # what to build + +vc8000-objs := vc8000_driver.o vc8000_vcmd_driver.o bidirect_list.o vcmdswhwregisters.o vc8000_normal_driver.o +obj-m += vc8000.o + +ifeq ($(strip $(SUPPORT_MMU)),y) +vc8000-objs += hantro_mmu.o +endif + +ifeq ($(strip $(SUPPORT_AXIFE)),y) +vc8000-objs += vc8000_axife.o +endif + +tardest := . + +################################################# +# compile modules + +ifneq ($(KERNELRELEASE),) +# recursive call from kernel build system +dummy := $(shell echo $(KERNELRELEASE) > $(obj)/.version) + +ifeq ($(VERSION).$(PATCHLEVEL),2.6) + export-objs := + list-multi := +else + multi-m := $(filter $(list-multi), $(obj-m)) + int-m := $(sort $(foreach m, $(multi-m), $($(basename $(m))-objs))) + export-objs := $(filter $(int-m) $(obj-m),$(export-objs)) +endif + +CC += -I$(obj) +EXTRA_CFLAGS += -g + +#ifeq ($(CONFIG_ANDROID), y) +#ccflags-y += -DTRACE_INCLUDE_PATH=$(src) +#else +#ccflags-y += -DTRACE_INCLUDE_PATH=$(PWD) +#endif + +# Print debugging messages from the device +#EXTRA_CFLAGS += -DHANTRO_DRIVER_DEBUG + +#EXTRA_CFLAGS += -DHANTROMMU_DEBUG +#EXTRA_CFLAGS += -DDYNAMIC_MALLOC_VCMDNODE + +snapshot := $(wildcard $(obj)/.snapshot) +ifneq ($(snapshot),) +SNAPSHOT_CFLAGS := -DSNAPSHOT='$(shell cat $(snapshot))' +EXTRA_CFLAGS += $(SNAPSHOT_CFLAGS) +endif + +ifeq ($(strip $(SUPPORT_MMU)),y) +EXTRA_CFLAGS += -DHANTROMMU_SUPPORT +endif + +ifeq ($(strip $(SUPPORT_AXIFE)),y) +EXTRA_CFLAGS += -DHANTROAXIFE_SUPPORT +endif + +ifeq ($(strip $(SUPPORT_VCMD_ENABLE_IP)),y) +EXTRA_CFLAGS += -DHANTROVCMD_ENABLE_IP_SUPPORT +endif + +-include $(TOPDIR)/Rules.make +else +# take version info from last module build if available +KERNELRELEASE := $(shell cat $(obj)/.version 2>/dev/null || uname -r) +endif +ifneq ($(ARM_CROSS_COMPILE),y) +KDIR_BASE := /afs/hantro.com/projects/Testing/Board_Version_Control + +#KDIR := $(KDIR_BASE)/Realview_EB/SW/Linux/v0_0/linux-2.6.19-arm2 +#KDIR := $(KDIR_BASE)/Realview_EB/SW/Linux/linux-2.6.21-arm1/v0_0/linux-2.6.21-arm1 +#KDIR := $(KDIR_BASE)/Realview_PB/PB926EJS/SW/Linux/linux-2.6.24-arm2-spnlck/v0_1/linux-2.6.24-arm2-spnlck +#KDIR := $(KDIR_BASE)/Realview_PB/PB926EJS/SW/Linux/linux-2.6.28-arm1/v0_1/linux-2.6.28-arm1 +#KDIR := $(KDIR_BASE)/SW_Common/ARM_realview_v6/2.6.28-arm1/v0_1-v6/linux-2.6.28-arm1 + +KVER := $(shell uname -r) +KDIR := /lib/modules/$(KVER)/build +endif + +PWD := $(shell pwd) +DEST := /lib/modules/$(KERNELRELEASE)/$(MDIR) + +# which files to install? +inst-m := $(wildcard *.ko) +ifeq ($(inst-m),) + inst-m := $(obj-m) +endif + + +# locales seem to cause trouble sometimes. +LC_ALL = POSIX +export LC_ALL + +default:: + $(MAKE) -C $(KDIR) M=$(PWD) modules + +install:: + strip --strip-debug $(inst-m) + -su -c "mkdir -p $(DEST); cp -v $(inst-m) $(DEST); depmod -a" + +clean:: + $(MAKE) -C $(KDIR) M=$(PWD) clean + -rm -f .version + +################################################# +# build tarballs + +thisdir := $(notdir $(PWD)) +name := $(shell echo $(thisdir) | sed 's/-.*//') +ver := $(shell echo $(thisdir) | sed 's/.*-//') +date := $(shell date +%Y%m%d) +tardest ?= . + +snapdir := $(HOME)/snapshot +snap ?= $(name) + +release: clean + rm -f .snapshot + (cd ..; tar cvzf $(tardest)/$(name)-$(ver).tar.gz $(thisdir)) + +snapshot snap tarball: clean + echo $(date) > .snapshot + (cd ..; tar czf $(snapdir)/$(snap)-$(date).tar.gz $(thisdir)) + $(MAKE) -C $(snapdir) + +################################################# +# other stuff + +%.asm: %.o + objdump -S $< > $@ + diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/README b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/README new file mode 100755 index 00000000000000..23378984f02c2c --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/README @@ -0,0 +1,42 @@ + +-- BUILD -- + +You need a fully configured kernel source tree in order to build the +driver. Please set the location of the kernel tree in the Makefile (KDIR). +If you want some extra debug information in the kernel logs, you could +define the HANTRO_DRIVER_DEBUG but please be aware that allot of things are traced +with this option. +Also you could set a particular device MAJOR in the 'vc8000_normal_driver.c' and 'vc8000_vcmd_driver.c' +if you don't want dynamic allocation. + +Just run in this dir: + +%make + +If you want to install the modules please check first the install destination +in the Makefile (MDIR, DEST) and run: + +%make install + +-- USAGE -- + +Run script driver_load.sh to do all the things described below. +> sh driver_load.sh vcmd=0 + +First of all the module has to be inserted into the kernel with: +(you need a Linux shell cmd line) + +%insmod vc8000.ko vcmd_supported=1 + +Second of all a char device file has to be created: + +%mknod /dev/vc8000 c $MAJOR 0 + +Replace MAJOR = 254 with the correct value (i.e. read /proc/devices to find out +the exact value). + +Make sure that you have RW rights for the newly created dev file (use 'chmod'). + +The 'driver_load' script is provided for preparing all the things necessary for +the driver to be usable. The script is using 'cat' to retrieve the device's +major from /proc/devices. Remember to set the driver parameters. diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/bidirect_list.c b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/bidirect_list.c new file mode 100644 index 00000000000000..dc38ac25ab392c --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/bidirect_list.c @@ -0,0 +1,222 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ +#ifdef __FREERTOS__ +#include +#include "osal.h" +#elif defined(__linux__) +#include +#include +/* needed for __init,__exit directives */ +#include +/* needed for remap_page_range + SetPageReserved + ClearPageReserved +*/ +#include +/* obviously, for kmalloc */ +#include +/* for struct file_operations, register_chrdev() */ +#include +/* standard error codes */ +#include + +#include +/* request_irq(), free_irq() */ +#include +#include + +#include +#include +/* needed for virt_to_phys() */ +#include +#include +#include +#include + +#include + +#include +#include +#include +#else //For other os +//TODO... +#endif +#include "bidirect_list.h" + +void init_bi_list(bi_list* list) +{ + list->head = NULL; + list->tail = NULL; +} + +bi_list_node* bi_list_create_node(void) +{ + bi_list_node* node=NULL; + node=(bi_list_node*)vmalloc(sizeof(bi_list_node)); + if(node==NULL) + { + PDEBUG ("%s\n","vmalloc for node fail!"); + return node; + } + memset(node,0,sizeof(bi_list_node)); + return node; +} +void bi_list_free_node(bi_list_node* node) +{ + //free current node + vfree(node); + return; +} + +void bi_list_insert_node_tail(bi_list* list,bi_list_node* current_node) +{ + if(current_node==NULL) + { + PDEBUG ("%s\n","insert node tail NULL"); + return; + } + if(list->tail) + { + current_node->previous=list->tail; + list->tail->next=current_node; + list->tail=current_node; + list->tail->next=NULL; + } + else + { + list->head=current_node; + list->tail=current_node; + current_node->next=NULL; + current_node->previous=NULL; + } + return; +} + +void bi_list_insert_node_before(bi_list* list,bi_list_node* base_node,bi_list_node* new_node) +{ + bi_list_node* temp_node_previous=NULL; + if(new_node==NULL) + { + PDEBUG ("%s\n","insert node before new node NULL"); + return; + } + if(base_node) + { + if(base_node->previous) + { + //at middle position + temp_node_previous = base_node->previous; + temp_node_previous->next=new_node; + new_node->next = base_node; + base_node->previous = new_node; + new_node->previous=temp_node_previous; + } + else + { + //at head + base_node->previous = new_node; + new_node->next = base_node; + list->head=new_node; + new_node->previous = NULL; + } + } + else + { + //at tail + bi_list_insert_node_tail(list,new_node); + } + return; +} + + +void bi_list_remove_node(bi_list* list,bi_list_node* current_node) +{ + bi_list_node* temp_node_previous=NULL; + bi_list_node* temp_node_next=NULL; + if(current_node==NULL) + { + PDEBUG ("%s\n","remove node NULL"); + return; + } + temp_node_next=current_node->next; + temp_node_previous=current_node->previous; + + if(temp_node_next==NULL && temp_node_previous==NULL ) + { + //there is only one node. + list->head=NULL; + list->tail=NULL; + } + else if(temp_node_next==NULL) + { + //at tail + list->tail=temp_node_previous; + temp_node_previous->next=NULL; + } + else if( temp_node_previous==NULL) + { + //at head + list->head=temp_node_next; + temp_node_next->previous=NULL; + } + else + { + //at middle position + temp_node_previous->next=temp_node_next; + temp_node_next->previous=temp_node_previous; + } + return; +} + + diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/bidirect_list.h b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/bidirect_list.h new file mode 100644 index 00000000000000..002cc99aa09377 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/bidirect_list.h @@ -0,0 +1,116 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#ifndef _BIDIRECT_LIST_H_ +#define _BIDIRECT_LIST_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __FREERTOS__ +#include "dev_common_freertos.h" /* needed for the _IOW etc stuff used later */ +#elif defined(__linux__) +#include /* needed for the _IOW etc stuff used later */ +#else //For other os +//TODO... +#endif + +/* + * Macros to help debugging + */ + +#undef PDEBUG /* undef it, just in case */ +#ifdef BIDIRECTION_LIST_DEBUG +# ifdef __KERNEL__ + /* This one if debugging is on, and kernel space */ +# define PDEBUG(fmt, args...) printk( KERN_INFO "hmp4e: " fmt, ## args) +# else + /* This one for user space */ +# define PDEBUG(fmt, args...) printf(__FILE__ ":%d: " fmt, __LINE__ , ## args) +# endif +#else +# define PDEBUG(fmt, args...) /* not debugging: nothing */ +#endif + +/***********************************************************************************************************************************************\ +* +\**********************************************************************************************************************************************/ +typedef struct bi_list_node{ + void* data; + struct bi_list_node* next; + struct bi_list_node* previous; +}bi_list_node; +typedef struct bi_list{ + bi_list_node* head; + bi_list_node* tail; +}bi_list; + +void init_bi_list(bi_list* list); + +bi_list_node* bi_list_create_node(void); + +void bi_list_free_node(bi_list_node* node); + +void bi_list_insert_node_tail(bi_list* list,bi_list_node* current_node); + +void bi_list_insert_node_before(bi_list* list,bi_list_node* base_node,bi_list_node* new_node); + +void bi_list_remove_node(bi_list* list,bi_list_node* current_node); + +#ifdef __cplusplus +} +#endif + +#endif /* !_BIDIRECT_LIST_H_ */ diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/driver_load.sh b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/driver_load.sh new file mode 100755 index 00000000000000..438243f7b5c6fc --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/driver_load.sh @@ -0,0 +1,61 @@ +#!/bin/sh +#dmesg -C +module="vc8000" +device="/dev/vc8000" +mode="666" +#Used to setup default parameters +DefaultParameter(){ + vcmd=1 + #default value can be added to here +} +echo + +if [ ! -e /dev ] +then + mkdir /dev/ +fi +echo "Help information:" +echo "Input format should be like as below" +echo "./driver_load.sh vcmd=0(default) or (1)" +if [ $# -eq 0 ] +then + DefaultParameter + echo " Default vcmd_supported value = $vcmd" +else + para_1="$1" + vcmd_input=${para_1##*=} + vcmd=$vcmd_input + if [ $vcmd -ne 0 ] && [ $vcmd -ne 1 ] + then + echo "Invalid vcmd_supported value, which = $vcmd" + echo "vcmd_supported should be 0 or 1" + fi + echo "vcmd_supported = $vcmd" +fi +#vcmd_supported = 0(default) or 1 +#insert module +insmod $module.ko vcmd_supported=$vcmd || exit 1 +#insmod $module.ko vcmd_supported=1 || exit 1 + +echo "module $module inserted" + +#remove old nod +rm -f $device + +#read the major asigned at loading time +major=`cat /proc/devices | grep $module | cut -c1-3` + +echo "$module major = $major" + +#create dev node +mknod $device c $major 0 + +echo "node $device created" + +#give all 'rw' access +chmod $mode $device + +echo "set node access to $mode" + +#the end +echo diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/hantro_mmu.c b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/hantro_mmu.c new file mode 100755 index 00000000000000..6c8dd57988aed2 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/hantro_mmu.c @@ -0,0 +1,1911 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ +#ifdef __FREERTOS__ +#include "base_type.h" +#include "dev_common_freertos.h" +#include "io_tools.h" +#elif defined(__linux__) +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0)) +#include +#else +#include +#endif +#include +#include +#include +#include +#endif +#include "vc8000_devfreq.h" +#include "hantrommu.h" + +MODULE_DESCRIPTION("Verisilicon VPU Driver"); +MODULE_LICENSE("GPL"); + +#ifndef NULL +#ifdef __cplusplus +#define NULL 0 +#else +#define NULL ((void *)0) +#endif +#endif + +/******************************************************************************* +***** New MMU Defination *******************************************************/ +#define MMU_MTLB_SHIFT 22 +#define MMU_STLB_4K_SHIFT 12 +#define MMU_STLB_64K_SHIFT 16 + +#define MMU_MTLB_BITS (32 - MMU_MTLB_SHIFT) +#define MMU_PAGE_4K_BITS MMU_STLB_4K_SHIFT +#define MMU_STLB_4K_BITS (32 - MMU_MTLB_BITS - MMU_PAGE_4K_BITS) +#define MMU_PAGE_64K_BITS MMU_STLB_64K_SHIFT +#define MMU_STLB_64K_BITS (32 - MMU_MTLB_BITS - MMU_PAGE_64K_BITS) + +#define MMU_MTLB_ENTRY_NUM (1 << MMU_MTLB_BITS) +#define MMU_MTLB_SIZE (MMU_MTLB_ENTRY_NUM << 2) +#define MMU_STLB_4K_ENTRY_NUM (1 << MMU_STLB_4K_BITS) +#define MMU_STLB_4K_SIZE (MMU_STLB_4K_ENTRY_NUM << 2) +#define MMU_PAGE_4K_SIZE (1 << MMU_STLB_4K_SHIFT) +#define MMU_STLB_64K_ENTRY_NUM (1 << MMU_STLB_64K_BITS) +#define MMU_STLB_64K_SIZE (MMU_STLB_64K_ENTRY_NUM << 2) +#define MMU_PAGE_64K_SIZE (1 << MMU_STLB_64K_SHIFT) + +#define MMU_MTLB_MASK (~((1U << MMU_MTLB_SHIFT)-1)) +#define MMU_STLB_4K_MASK ((~0U << MMU_STLB_4K_SHIFT) ^ MMU_MTLB_MASK) +#define MMU_PAGE_4K_MASK (MMU_PAGE_4K_SIZE - 1) +#define MMU_STLB_64K_MASK ((~((1U << MMU_STLB_64K_SHIFT)-1)) ^ MMU_MTLB_MASK) +#define MMU_PAGE_64K_MASK (MMU_PAGE_64K_SIZE - 1) + +/* Page offset definitions. */ +#define MMU_OFFSET_4K_BITS (32 - MMU_MTLB_BITS - MMU_STLB_4K_BITS) +#define MMU_OFFSET_4K_MASK ((1U << MMU_OFFSET_4K_BITS) - 1) +#define MMU_OFFSET_16K_BITS (32 - MMU_MTLB_BITS - MMU_STLB_16K_BITS) +#define MMU_OFFSET_16K_MASK ((1U << MMU_OFFSET_16K_BITS) - 1) + +#define MMU_MTLB_ENTRY_HINTS_BITS 6 +#define MMU_MTLB_ENTRY_STLB_MASK (~((1U << MMU_MTLB_ENTRY_HINTS_BITS) - 1)) + +#define MMU_MTLB_PRESENT 0x00000001 +#define MMU_MTLB_EXCEPTION 0x00000002 +#define MMU_MTLB_4K_PAGE 0x00000000 + +#define MMU_STLB_PRESENT 0x00000001 +#define MMU_STLB_EXCEPTION 0x00000002 +#define MMU_STLB_4K_PAGE 0x00000000 + +#define MMU_FALSE 0 +#define MMU_TRUE 1 + +#define MMU_ERR_OS_FAIL (0xffff) +#define MMU_EFAULT MMU_ERR_OS_FAIL +#define MMU_ENOTTY MMU_ERR_OS_FAIL + +#define MMU_INFINITE ((u32) ~0U) + +#define MAX_NOPAGED_SIZE 0x20000 +#define MMU_SUPPRESS_OOM_MESSAGE 1 + +#if MMU_SUPPRESS_OOM_MESSAGE +#define MMU_NOWARN __GFP_NOWARN +#else +#define MMU_NOWARN 0 +#endif + +#define MMU_IS_ERROR(status) (status < 0) +#define MMU_NO_ERROR(status) (status >= 0) +#define MMU_IS_SUCCESS(status) (status == MMU_STATUS_OK) + +#undef MMUDEBUG +#ifdef HANTROMMU_DEBUG +# ifdef __KERNEL__ +# define MMUDEBUG(fmt, args...) printk( KERN_INFO "hantrommu: " fmt, ## args) +# else +# define MMUDEBUG(fmt, args...) fprintf(stderr, fmt, ## args) +# endif +#else +# define MMUDEBUG(fmt, args...) pr_debug("hantrommu: " fmt, ## args) +#endif + +#define MMU_ON_ERROR(func) \ + do { \ + status = func; \ + if (MMU_IS_ERROR(status)){ \ + goto onerror; \ + } \ + }while (MMU_FALSE) + +#define WritePageEntry(page_entry, entry_value) \ + *(unsigned int *)(page_entry) =(unsigned int)(entry_value) + +#define ReadPageEntry(page_entry) *(unsigned int *)(page_entry) + +#define DRIVER_NAME "hantroencdma" + +/* simple map mode: generate mmu address which is same as input bus address*/ +unsigned int simple_map = 0; +/* this shift should be an integral multiple of mmu page size(4096). + It can generate a mmu address shift in simple map mode*/ +unsigned int map_shift = 0; + +/* module_param(name, type, perm) */ +module_param(simple_map, uint, 0); +module_param(map_shift, uint, 0); + +enum MMURegion { + MMU_REGION_IN, + MMU_REGION_OUT, + MMU_REGION_PRIVATE, + MMU_REGION_PUB, + + MMU_REGION_COUNT +}; + +struct MMUNode { + void *buf_virtual_address; + unsigned int buf_bus_address; /* used in kernel map mode */ + int mtlb_start; + int stlb_start; + int mtlb_end; + int stlb_end; + unsigned int page_count; + int process_id; + struct file* filp; + + struct MMUNode *next; + struct MMUNode *prev; +}; + +struct MMUDDRRegion { + unsigned long long physical_address; + unsigned long long virtual_address; + unsigned int page_count; + + void *node_mutex; + struct MMUNode *simple_map_head; + struct MMUNode *simple_map_tail; + struct MMUNode *free_map_head; + struct MMUNode *map_head; + struct MMUNode *free_map_tail; + struct MMUNode *map_tail; +}; + +struct MMU { + void *page_table_mutex; + /* Master TLB information. */ + unsigned int mtlb_size; + unsigned long long mtlb_physical; + void *mtlb_virtual; + unsigned int mtlb_entries; + + int enabled; + unsigned int stlb_size; + unsigned long long stlb_physical; + void *stlb_virtual; + struct MMUDDRRegion region[MMU_REGION_COUNT]; + unsigned int page_table_array_size; + unsigned long long page_table_array_physical; + void *page_table_array; +}; + +static struct MMU *g_mmu = NULL; +extern unsigned long gBaseDDRHw; +unsigned int mmu_enable = MMU_FALSE; +static unsigned int mmu_init = MMU_FALSE; +extern unsigned int pcie; +static unsigned int region_in_mmu_start = REGION_IN_MMU_START; +static unsigned int region_in_mmu_end = REGION_IN_MMU_END; +static unsigned int region_out_mmu_start = REGION_OUT_MMU_START; +static unsigned int region_out_mmu_end = REGION_OUT_MMU_END; +static unsigned int region_private_mmu_start = REGION_PRIVATE_MMU_START; +static unsigned int region_private_mmu_end = REGION_PRIVATE_MMU_END; + +static const struct platform_device_info hantro_platform_info = { + .name = DRIVER_NAME, + .id = -1, + .dma_mask = DMA_BIT_MASK(32), +}; + +static int hantro_drm_probe(struct platform_device *pdev) +{ + int result; + struct device *dev = &pdev->dev; + (void) dev; + (void) result; + return 0; +} +static int hantro_drm_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + (void) dev; + return 0; +} +static const struct platform_device_id hantro_drm_platform_ids[] = { + { + .name = DRIVER_NAME, + }, + {/* sentinel */ }, +}; +static const struct of_device_id hantro_of_match[] = { + { .compatible = "thead,light-vc8000e-mmu", }, + {/* sentinel */} +}; +static struct platform_driver hantro_drm_platform_driver = { + .probe = hantro_drm_probe, + .remove = hantro_drm_remove, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = hantro_of_match, + }, + .id_table = hantro_drm_platform_ids, +}; + +struct platform_device *platformdev; + +static enum MMUStatus ZeroMemory(void *memory, unsigned int bytes) { + memset(memory, 0, bytes); + + return MMU_STATUS_OK; +} + +static enum MMUStatus AllocateMemory(unsigned int bytes, void **memory){ + void *pointer; + enum MMUStatus status; + + if (bytes > MAX_NOPAGED_SIZE) { + pointer = (void*) vmalloc(bytes); + MMUDEBUG(" *****VMALLOC size*****%d\n", bytes); + } else { + pointer = (void*) kmalloc(bytes, GFP_KERNEL | MMU_NOWARN); + MMUDEBUG(" *****KMALLOC size*****%d\n", bytes); + } + + if (pointer == NULL) { + /* Out of memory. */ + status = MMU_STATUS_OUT_OF_MEMORY; + goto onerror; + } + + /* Return pointer to the memory allocation. */ + *memory = pointer; + + return MMU_STATUS_OK; + +onerror: + /* Return the status. */ + return status; +} + +static enum MMUStatus FreeMemory(void *memory) { + /* Free the memory from the OS pool. */ + if (is_vmalloc_addr(memory)) { + MMUDEBUG(" *****VFREE*****%p\n", memory); + vfree(memory); + } else { + MMUDEBUG(" *****KFREE*****%p\n", memory); + kfree(memory); + } + return MMU_STATUS_OK; +} + + +static enum MMUStatus SMDeleteNode(struct MMUNode **pp) { + (*pp)->prev->next = (*pp)->next; + (*pp)->next->prev = (*pp)->prev; + + MMUDEBUG(" *****DeleteNode size*****%d\n", (*pp)->page_count); + FreeMemory(*pp); + + return MMU_STATUS_OK; +} + +static enum MMUStatus DeleteNode(struct MMUNode **pp) { + (*pp)->prev->next = (*pp)->next; + (*pp)->next->prev = (*pp)->prev; + + MMUDEBUG(" *****DeleteNode size*****%d\n", (*pp)->page_count); + FreeMemory(*pp); + + return MMU_STATUS_OK; +} + +static enum MMUStatus MergeNode(struct MMUNode *h, + struct MMUNode **pp) { + struct MMUNode *tmp0 = h->next; + struct MMUNode *tmp1 = h->next; + while(tmp0) { + /* 1th step: find front contiguous memory node */ + if(tmp0->mtlb_end == (*pp)->mtlb_start && + tmp0->stlb_end == (*pp)->stlb_start) { + tmp0->mtlb_end = (*pp)->mtlb_end; + tmp0->stlb_end = (*pp)->stlb_end; + tmp0->page_count += (*pp)->page_count; + DeleteNode(pp); + MMUDEBUG(" *****first merge to front. node size*****%d\n", tmp0->page_count); + /* after merge to front contiguous memory node, + find if there is behind contiguous memory node */ + while(tmp1) { + /* merge */ + if(tmp1->mtlb_start == tmp0->mtlb_end && + tmp1->stlb_start == tmp0->stlb_end) { + tmp1->mtlb_start = tmp0->mtlb_start; + tmp1->stlb_start = tmp0->stlb_start; + tmp1->page_count += tmp0->page_count; + MMUDEBUG(" *****second merge to behind. node size*****%d\n", tmp1->page_count); + DeleteNode(&tmp0); + return MMU_STATUS_OK; + } + tmp1 = tmp1->next; + } + return MMU_STATUS_OK; + /* 1th step: find behind contiguous memory node */ + } else if(tmp0->mtlb_start == (*pp)->mtlb_end && + tmp0->stlb_start == (*pp)->stlb_end) { + tmp0->mtlb_start = (*pp)->mtlb_start; + tmp0->stlb_start = (*pp)->stlb_start; + tmp0->page_count += (*pp)->page_count; + DeleteNode(pp); + MMUDEBUG(" *****first merge to behind. node size*****%d\n", tmp0->page_count); + /* after merge to behind contiguous memory node, + find if there is front contiguous memory node */ + while(tmp1) { + /* merge */ + if(tmp1->mtlb_end == tmp0->mtlb_start && + tmp1->stlb_end == tmp0->stlb_start) { + tmp1->mtlb_end = tmp0->mtlb_end; + tmp1->stlb_end = tmp0->stlb_end; + tmp1->page_count += tmp0->page_count; + MMUDEBUG(" *****second merge to front. node size*****%d\n", tmp1->page_count); + DeleteNode(&tmp0); + return MMU_STATUS_OK; + } + tmp1 = tmp1->next; + } + return MMU_STATUS_OK; + } + tmp0 = tmp0->next; + } + return MMU_STATUS_FALSE; +} + +/* Insert a node to map list */ +static enum MMUStatus SMInsertNode(enum MMURegion e, + struct MMUNode **pp) { + struct MMUNode *h; + + h = g_mmu->region[e].simple_map_head; + + h->next->prev = *pp; + (*pp)->next = h->next; + (*pp)->prev = h; + h->next = *pp; + MMUDEBUG(" *****insert bm node*****%d\n", (*pp)->page_count); + + return MMU_STATUS_OK; +} + +static enum MMUStatus InsertNode(enum MMURegion e, + struct MMUNode **pp, + unsigned int free) { + enum MMUStatus status; + struct MMUNode *h, *b; + + if(free) { + h = g_mmu->region[e].free_map_head; + b = g_mmu->region[e].map_head; + status = MergeNode(h, pp); + MMUDEBUG(" *****insert free*****%d\n", (*pp)->page_count); + if(MMU_IS_ERROR(status)) { + /* remove from map*/ + if((*pp)->prev != NULL && (*pp)->next != NULL) { + (*pp)->prev->next = (*pp)->next; + (*pp)->next->prev = (*pp)->prev; + } + /* insert to free map */ + h->next->prev = *pp; + (*pp)->next = h->next; + (*pp)->prev = h; + h->next = *pp; + } + } else { + h = g_mmu->region[e].map_head; + + h->next->prev = *pp; + (*pp)->next = h->next; + (*pp)->prev = h; + h->next = *pp; + MMUDEBUG(" *****insert unfree*****%d\n", (*pp)->page_count); + } + + return MMU_STATUS_OK; +} + +/* Create a Node */ +static enum MMUStatus SMCreateNode(enum MMURegion e, + struct MMUNode **node, + unsigned int page_count) { + struct MMUNode *p, **new; + + p = kmalloc(sizeof(struct MMUNode), GFP_KERNEL | MMU_NOWARN); + new = &p; + + (*new)->mtlb_start = -1; + (*new)->stlb_start = -1; + (*new)->mtlb_end = -1; + (*new)->stlb_end = -1; + (*new)->process_id = 0; + (*new)->filp = NULL; + (*new)->page_count = 0; + (*new)->prev = NULL; + (*new)->next = NULL; + /* Insert a uncomplete Node, it will be initialized later */ + SMInsertNode(e, new); + + /* return a new node for map buffer */ + *node = *new; + return MMU_STATUS_OK; +} + +/* Create initial Nodes */ +static enum MMUStatus SMCreateNodes(void) { + struct MMUNode *simple_map_head; + struct MMUNode *simple_map_tail; + int i; + + /* Init each region map node */ + for (i = 0; i < MMU_REGION_COUNT ; i++) { + simple_map_head = kmalloc(sizeof(struct MMUNode), GFP_KERNEL | MMU_NOWARN); + simple_map_tail = kmalloc(sizeof(struct MMUNode), GFP_KERNEL | MMU_NOWARN); + + simple_map_head->mtlb_start = -1; + simple_map_head->stlb_start = -1; + simple_map_head->mtlb_end = -1; + simple_map_head->stlb_end = -1; + simple_map_head->process_id = 0; + simple_map_head->filp = NULL; + simple_map_head->page_count = 0; + simple_map_head->prev = NULL; + simple_map_head->next = simple_map_tail; + + simple_map_tail->mtlb_start = -1; + simple_map_tail->stlb_start = -1; + simple_map_tail->mtlb_end = -1; + simple_map_tail->stlb_end = -1; + simple_map_tail->process_id = 0; + simple_map_tail->filp = NULL; + simple_map_tail->page_count = 0; + simple_map_tail->prev = simple_map_head; + simple_map_tail->next = NULL; + + g_mmu->region[i].simple_map_head = simple_map_head; + g_mmu->region[i].simple_map_tail = simple_map_tail; + } + return MMU_STATUS_OK; +} + +static enum MMUStatus CreateNode(void) { + struct MMUNode *free_map_head, *map_head, *p, **pp; + struct MMUNode *free_map_tail, *map_tail; + int i; + unsigned int page_count; + unsigned int prev_stlb = 0, prev_mtlb = 0; + + /* Init each region map node */ + for (i = 0; i < MMU_REGION_COUNT ; i++) { + free_map_head = kmalloc(sizeof(struct MMUNode), GFP_KERNEL | MMU_NOWARN); + map_head = kmalloc(sizeof(struct MMUNode), GFP_KERNEL | MMU_NOWARN); + free_map_tail = kmalloc(sizeof(struct MMUNode), GFP_KERNEL | MMU_NOWARN); + map_tail = kmalloc(sizeof(struct MMUNode), GFP_KERNEL | MMU_NOWARN); + + free_map_head->mtlb_start = map_head->mtlb_start = -1; + free_map_head->stlb_start = map_head->stlb_start = -1; + free_map_head->mtlb_end = map_head->mtlb_end = -1; + free_map_head->stlb_end = map_head->stlb_end = -1; + free_map_head->process_id = map_head->process_id = 0; + free_map_head->filp = map_head->filp = NULL; + free_map_head->page_count = map_head->page_count = 0; + free_map_head->prev = map_head->prev = NULL; + free_map_head->next = free_map_tail; + map_head->next = map_tail; + + free_map_tail->mtlb_start = map_tail->mtlb_start = -1; + free_map_tail->stlb_start = map_tail->stlb_start = -1; + free_map_tail->mtlb_end = map_tail->mtlb_end = -1; + free_map_tail->stlb_end = map_tail->stlb_end = -1; + free_map_tail->process_id = map_tail->process_id = 0; + free_map_tail->filp = map_tail->filp = NULL; + free_map_tail->page_count = map_tail->page_count = 0; + free_map_tail->prev = free_map_head; + map_tail->prev = map_head; + free_map_tail->next = map_tail->next = NULL; + + g_mmu->region[i].free_map_head = free_map_head; + g_mmu->region[i].map_head = map_head; + g_mmu->region[i].free_map_tail = free_map_tail; + g_mmu->region[i].map_tail = map_tail; + + p = kmalloc(sizeof(struct MMUNode), GFP_KERNEL | MMU_NOWARN); + pp = &p; + + switch(i) { + case MMU_REGION_IN: + page_count = (REGION_IN_END - REGION_IN_START + 1)/PAGE_SIZE; + p->stlb_start = region_in_mmu_start >> 12 & 0x3FF; //hold mmu addr: 0x0 + p->mtlb_start = region_in_mmu_start >> 22; + //end point next region start: +1; for remainder: +1 + p->stlb_end = prev_stlb = region_in_mmu_end >> 12 & 0x3FF; + p->mtlb_end = prev_mtlb = region_in_mmu_end >> 22; + p->page_count = page_count - 1; //hold mmu addr: 0x0 + break; + case MMU_REGION_OUT: + page_count = (REGION_OUT_END - REGION_OUT_START + 1)/PAGE_SIZE; + p->stlb_start = region_out_mmu_start >> 12 & 0x3FF; + p->mtlb_start = region_out_mmu_start >> 22; + p->stlb_end = prev_stlb = region_out_mmu_end >> 12 & 0x3FF; + p->mtlb_end = prev_mtlb = region_out_mmu_end >> 22; + p->page_count = page_count; + break; + case MMU_REGION_PRIVATE: + page_count = (REGION_PRIVATE_END - REGION_PRIVATE_START + 1)/PAGE_SIZE; + p->stlb_start = region_private_mmu_start >> 12 & 0x3FF; + p->mtlb_start = region_private_mmu_start >> 22; + p->stlb_end = prev_stlb = region_private_mmu_end >> 12 & 0x3FF; + p->mtlb_end = prev_mtlb = region_private_mmu_end >> 22; + p->page_count = page_count; + break; + case MMU_REGION_PUB: + p->stlb_start = prev_stlb; + p->mtlb_start = prev_mtlb; + p->stlb_end = prev_stlb = MMU_STLB_4K_ENTRY_NUM - 1; + p->mtlb_end = prev_mtlb = MMU_MTLB_ENTRY_NUM - 1; + p->page_count = (p->mtlb_end - p->mtlb_start) * MMU_STLB_4K_ENTRY_NUM + + p->stlb_end - p->stlb_start + 1; + break; + default: + pr_notice(" *****MMU Region Error*****\n"); + break; + } + + p->process_id = 0; + p->filp = NULL; + p->next = p->prev = NULL; + + InsertNode(i, pp, 1); + } + + return MMU_STATUS_OK; +} + +/* A simpile function to check if the map buffer is existed. + it needs more complex version*/ +static enum MMUStatus SMCheckAddress(enum MMURegion e, + void *virtual_address) { + struct MMUNode *p; + p = g_mmu->region[e].simple_map_head->next; + + while(p) { + if(p->buf_virtual_address == virtual_address) { + return MMU_STATUS_FALSE; + } + p = p->next; + } + return MMU_STATUS_OK; +} + +static enum MMUStatus FindFreeNode(enum MMURegion e, + struct MMUNode **node, + unsigned int page_count) { + struct MMUNode *p; + p = g_mmu->region[e].free_map_head->next; + + while(p) { + if(p->page_count >= page_count) { + *node = p; + return MMU_STATUS_OK; + } + p = p->next; + } + return MMU_STATUS_FALSE; +} + +static enum MMUStatus SplitFreeNode(enum MMURegion e, + struct MMUNode **node, + unsigned int page_count) { + struct MMUNode *p, **new; + + p = kmalloc(sizeof(struct MMUNode), GFP_KERNEL | MMU_NOWARN); + new = &p; + + **new = **node; + + (*new)->mtlb_start = (*node)->mtlb_start; + (*new)->stlb_start = (*node)->stlb_start; + (*new)->mtlb_end = (page_count + (*node)->stlb_start) / + MMU_STLB_4K_ENTRY_NUM + + (*node)->mtlb_start; + (*new)->stlb_end = (page_count + (*node)->stlb_start) % + MMU_STLB_4K_ENTRY_NUM; + (*new)->process_id = (*node)->process_id; + (*new)->page_count = page_count; + + MMUDEBUG(" *****new mtlb_start*****%d\n", (*new)->mtlb_start); + MMUDEBUG(" *****new stlb_start*****%d\n", (*new)->stlb_start); + MMUDEBUG(" *****new mtlb_end*****%d\n", (*new)->mtlb_end); + MMUDEBUG(" *****new stlb_end*****%d\n", (*new)->stlb_end); + /* Insert a new node in map */ + InsertNode(e, new, 0); + + /* Update free node in free map*/ + (*node)->page_count -= page_count; + if((*node)->page_count == 0) { + DeleteNode(node); + MMUDEBUG(" *****old node deleted*****\n"); + } else { + (*node)->mtlb_start = (*new)->mtlb_end; + (*node)->stlb_start = (*new)->stlb_end; + + MMUDEBUG(" *****old mtlb_start*****%d\n", (*node)->mtlb_start); + MMUDEBUG(" *****old stlb_start*****%d\n", (*node)->stlb_start); + MMUDEBUG(" *****old mtlb_end*****%d\n", (*node)->mtlb_end); + MMUDEBUG(" *****old stlb_end*****%d\n", (*node)->stlb_end); + } + /* return a new node for map buffer */ + *node = *new; + + return MMU_STATUS_OK; +} + +static enum MMUStatus SMRemoveNode(enum MMURegion e, + void *buf_virtual_address, + unsigned int process_id) { + struct MMUNode *p, **pp; + p = g_mmu->region[e].simple_map_head->next; + pp = &p; + + while(*pp) { + if((*pp)->buf_virtual_address == buf_virtual_address && + (*pp)->process_id == process_id) { + SMDeleteNode(pp); + break; + } + *pp = (*pp)->next; + } + + return MMU_STATUS_OK; +} + +static enum MMUStatus RemoveNode(enum MMURegion e, + void *buf_virtual_address, + unsigned int process_id) { + struct MMUNode *p, **pp; + p = g_mmu->region[e].map_head->next; + pp = &p; + + while(*pp) { + if((*pp)->buf_virtual_address == buf_virtual_address && + (*pp)->process_id == process_id) { + InsertNode(e, pp, 1); + break; + } + *pp = (*pp)->next; + } + + return MMU_STATUS_OK; +} + +static enum MMUStatus SMRemoveKernelNode(enum MMURegion e, + unsigned int buf_bus_address, + unsigned int process_id) { + struct MMUNode *p, **pp; + p = g_mmu->region[e].simple_map_head->next; + pp = &p; + + while(*pp) { + if((*pp)->buf_bus_address == buf_bus_address && + (*pp)->process_id == process_id) { + SMDeleteNode(pp); + break; + } + *pp = (*pp)->next; + } + + return MMU_STATUS_OK; +} + +static enum MMUStatus RemoveKernelNode(enum MMURegion e, + unsigned int buf_bus_address, + unsigned int process_id) { + struct MMUNode *p, **pp; + p = g_mmu->region[e].map_head->next; + pp = &p; + + while(*pp) { + if((*pp)->buf_bus_address == buf_bus_address && + (*pp)->process_id == process_id) { + InsertNode(e, pp, 1); + break; + } + *pp = (*pp)->next; + } + + return MMU_STATUS_OK; +} + +static enum MMUStatus Delay(unsigned int delay) { + if(delay > 0) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) + ktime_t dl = ktime_set((delay / MSEC_PER_SEC), + (delay % MSEC_PER_SEC) * NSEC_PER_MSEC); + __set_current_state(TASK_UNINTERRUPTIBLE); + schedule_hrtimeout(&dl, HRTIMER_MODE_REL); +#else + msleep(delay); +#endif + } + + return MMU_STATUS_OK; +} + +static enum MMUStatus CreateMutex(void **mtx) { + enum MMUStatus status; + + /* Allocate the mutex structure. */ + status = AllocateMemory(sizeof(struct mutex), mtx); + if (MMU_IS_SUCCESS(status)) { + /* Initialize the mutex. */ + mutex_init(*(struct mutex **)mtx); + } + + return status; +} + +static enum MMUStatus DeleteMutex(void *mtx) { + /* Destroy the mutex. */ + mutex_destroy((struct mutex *)mtx); + + /* Free the mutex structure. */ + FreeMemory(mtx); + + return MMU_STATUS_OK; +} + +static enum MMUStatus AcquireMutex(void *mtx, unsigned int timeout) { + if (timeout == MMU_INFINITE) + { + /* Lock the mutex. */ + mutex_lock(mtx); + + /* Success. */ + return MMU_STATUS_OK; + } + + for (;;) { + /* Try to acquire the mutex. */ + if (mutex_trylock(mtx)) { + /* Success. */ + return MMU_STATUS_OK; + } + + if (timeout-- == 0) { + break; + } + + /* Wait for 1 millisecond. */ + Delay(1); + } + + return MMU_STATUS_OK; +} + +static enum MMUStatus ReleaseMutex(void *mtx) { + /* Release the mutex. */ + mutex_unlock(mtx); + + return MMU_STATUS_OK; +} + + +static inline enum MMUStatus QueryProcessPageTable(void *logical, + unsigned long long *address) { + unsigned long lg = (unsigned long)logical; + unsigned long offset = lg & ~PAGE_MASK; + struct vm_area_struct *vma; + spinlock_t *ptl; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if (is_vmalloc_addr(logical)) { + /* vmalloc area. */ + *address = page_to_phys(vmalloc_to_page(logical)) | offset; + return MMU_STATUS_OK; + } else if (virt_addr_valid(lg)) { + /* Kernel logical address. */ + *address = virt_to_phys(logical); + return MMU_STATUS_OK; + } else { + /* Try user VM area. */ + if (!current->mm) + return MMU_STATUS_NOT_FOUND; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + down_read(¤t->mm->mmap_lock); +#else + down_read(¤t->mm->mmap_sem); +#endif + vma = find_vma(current->mm, lg); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + up_read(¤t->mm->mmap_lock); +#else + up_read(¤t->mm->mmap_sem); +#endif + + /* To check if mapped to user. */ + if (!vma) + return MMU_STATUS_NOT_FOUND; + + pgd = pgd_offset(current->mm, lg); + if (pgd_none(*pgd) || pgd_bad(*pgd)) + return MMU_STATUS_NOT_FOUND; + +#if (defined(CONFIG_CPU_CSKYV2) || defined(CONFIG_X86)) \ + && LINUX_VERSION_CODE >= KERNEL_VERSION (4,12,0) + pud = pud_offset((p4d_t*)pgd, lg); +#elif (defined(CONFIG_CPU_CSKYV2)) \ + && LINUX_VERSION_CODE >= KERNEL_VERSION (4,11,0) + pud = pud_offset((p4d_t*)pgd, lg); +#else + pud = pud_offset((p4d_t*)pgd, lg); +#endif + if (pud_none(*pud) || pud_bad(*pud)) + return MMU_STATUS_NOT_FOUND; + + pmd = pmd_offset(pud, lg); + if (pmd_none(*pmd) || pmd_bad(*pmd)) + return MMU_STATUS_NOT_FOUND; + + pte = pte_offset_map_lock(current->mm, pmd, lg, &ptl); + if (!pte) { + spin_unlock(ptl); + return MMU_STATUS_NOT_FOUND; + } + + if (!pte_present(*pte)) { + pte_unmap_unlock(pte, ptl); + return MMU_STATUS_NOT_FOUND; + } + + *address = (pte_pfn(*pte) << PAGE_SHIFT) | offset; + pte_unmap_unlock(pte, ptl); + + *address -= gBaseDDRHw; + //MMUDEBUG(" QueryProcessPageTable map: virt %p -> %p\n", logical, (void *)*address); + + return MMU_STATUS_OK; + } +} + +static inline int GetProcessID(void) { + return current->tgid; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) +static inline int is_vmalloc_addr(void *addr) { + unsigned long long addr = (unsigned long long)Addr; + + return addr >= VMALLOC_START && addr < VMALLOC_END; +} +#endif + +static enum MMUStatus GetPhysicalAddress(void *logical, + unsigned long long *address) { + enum MMUStatus status; + + status = QueryProcessPageTable(logical, address); + + return status; +} + +static enum MMUStatus GetPageEntry(struct MMUNode *node, + unsigned int **page_table_entry, + unsigned int i) { + int num = node->mtlb_start * MMU_STLB_4K_ENTRY_NUM + + node->stlb_start + i; + *page_table_entry = (unsigned int*)g_mmu->stlb_virtual + num; + return MMU_STATUS_OK; +} + +static enum MMUStatus SetupDynamicSpace(void) { + int i; + enum MMUStatus status; + unsigned int stlb_entry; + void *pointer; + unsigned long long address; + dma_addr_t dma_handle; + unsigned int num_entries = MMU_MTLB_ENTRY_NUM; + unsigned int *mtlb_virtual = (unsigned int *)g_mmu->mtlb_virtual; + + AcquireMutex(g_mmu->page_table_mutex, MMU_INFINITE); + if(pcie) { + pointer = ioremap(gBaseDDRHw+STLB_PCIE_START_ADDRESS, num_entries*MMU_STLB_4K_SIZE); + g_mmu->stlb_virtual = pointer; + MMUDEBUG(" *****stlb_virtual = %p**%d\n", pointer, num_entries*MMU_STLB_4K_SIZE); + address = STLB_PCIE_START_ADDRESS; + for(i = 0; i < num_entries; i++){ + stlb_entry = address + /* 4KB page size */ + | (0 << 2) + /* Ignore exception */ + | (0 << 1) + /* Present */ + | (1 << 0); + WritePageEntry(mtlb_virtual++, stlb_entry); + address += MMU_STLB_4K_SIZE; + } + + } else { + g_mmu->stlb_virtual = (void *)((u64)(g_mmu->mtlb_virtual) + MMU_MTLB_SIZE); + g_mmu->stlb_physical = address = g_mmu->mtlb_physical + MMU_MTLB_SIZE; + g_mmu->stlb_size = num_entries * MMU_STLB_4K_SIZE; + + for(i = 0; i < num_entries; i++){ + stlb_entry = address + /* 4KB page size */ + | (0 << 2) + /* Ignore exception */ + | (0 << 1) + /* Present */ + | (1 << 0); + WritePageEntry(mtlb_virtual++, stlb_entry); + address += MMU_STLB_4K_SIZE; + } + } + ReleaseMutex(g_mmu->page_table_mutex); + + /* Initial map info. */ + if (simple_map) + SMCreateNodes(); + else + CreateNode(); + + return MMU_STATUS_OK; +onerror: + /* Return status. */ + return status; +} + + +enum MMUStatus MMUInit(volatile unsigned char *hwregs) { + enum MMUStatus status; + unsigned i; + int result; + void *pointer; + + if (mmu_init == MMU_TRUE) { + /* All mmu use common table and dev, just initial once*/ + pr_notice(" *****MMU Already Initialed*****\n"); + return MMU_STATUS_OK; + } + + if(!hwregs || (ioread32((void*)(hwregs + MMU_REG_HW_ID))>>16) != 0x4D4D) + return MMU_STATUS_NOT_FOUND; + + pr_notice(" *****MMU Init*****\n"); + + platformdev = platform_device_register_full(&hantro_platform_info); + if(platformdev == NULL) { + pr_err("hantrodec create platform device fail\n"); + status = MMU_STATUS_FALSE; + goto onerror; + } else { + #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) + platformdev->dev.dma_coherent = 0; //For this device dma alloc coherent mem + #endif + pr_info("Create platform device success\n"); + } + + result = platform_driver_register(&hantro_drm_platform_driver); + pr_notice("Platform driver status is %d\n", result); + + /* Allocate memory for the MMU object. */ + MMU_ON_ERROR(AllocateMemory(sizeof(struct MMU), &pointer)); + ZeroMemory(pointer, sizeof(struct MMU)); + + g_mmu = pointer; + + g_mmu->page_table_mutex = NULL; + + /* Create the page table mutex. */ + MMU_ON_ERROR(CreateMutex(&g_mmu->page_table_mutex)); + + for (i = 0; i < MMU_REGION_COUNT;i++) { + MMU_ON_ERROR(CreateMutex(&g_mmu->region[i].node_mutex)); + } + + mmu_init = MMU_TRUE; + return MMU_STATUS_OK; + +onerror: + pr_notice(" *****MMU Init Error*****\n"); + return status; +} + +enum MMUStatus MMURelease(void *filp, volatile unsigned char *hwregs) { + int i, j; + struct MMUNode *p, *tmp; + unsigned long long address; + unsigned int *page_table_entry; + + if(!hwregs) + return MMU_STATUS_FALSE; + + /* if mmu or TLB not enabled, return */ + if (simple_map) { + if(g_mmu == NULL || g_mmu->region[0].simple_map_head == NULL) + return MMU_STATUS_OK; + } else { + if(g_mmu == NULL || g_mmu->region[0].map_head == NULL) + return MMU_STATUS_OK; + } + + pr_debug(" *****MMU Release*****\n"); + + AcquireMutex(g_mmu->page_table_mutex, MMU_INFINITE); + + if (simple_map) { + for (i = 0; i < MMU_REGION_COUNT; i++) { + p = g_mmu->region[i].simple_map_head->next; + + while(p) { + tmp = p->next; + if(p->filp == (struct file *)filp) { + + for(j = 0;j < p->page_count; j++) { + GetPageEntry(p, &page_table_entry, j); + address = 0; + WritePageEntry(page_table_entry, address); + } + + SMRemoveNode(i, p->buf_virtual_address, p->process_id); + } + p = tmp; + } + } + } else { + for (i = 0; i < MMU_REGION_COUNT; i++) { + p = g_mmu->region[i].map_head->next; + + while(p) { + tmp = p->next; + if(p->filp == (struct file *)filp) { + + for(j = 0;j < p->page_count; j++) { + GetPageEntry(p, &page_table_entry, j); + address = 0; + WritePageEntry(page_table_entry, address); + } + + RemoveNode(i, p->buf_virtual_address, p->process_id); + } + p = tmp; + } + } + } + ReleaseMutex(g_mmu->page_table_mutex); + + return MMU_STATUS_OK; +} + +enum MMUStatus MMUCleanup(volatile unsigned char *hwregs[MAX_SUBSYS_NUM][2]) { + int i; + struct MMUNode *p, *tmp; + struct MMUNode *fp; + + for (i = 0; i < MAX_SUBSYS_NUM; i++) { + if (hwregs[i][0] != NULL && + (ioread32((void*)(hwregs[i][0] + MMU_REG_HW_ID))>>16) != 0x4D4D) + return MMU_STATUS_FALSE; + if (hwregs[i][1] != NULL && + (ioread32((void*)(hwregs[i][1] + MMU_REG_HW_ID))>>16) != 0x4D4D) + return MMU_STATUS_FALSE; + } + + pr_info(" *****MMU cleanup*****\n"); + if (pcie) { + if (g_mmu->stlb_virtual) + iounmap(g_mmu->stlb_virtual); + if (g_mmu->mtlb_virtual) + iounmap(g_mmu->mtlb_virtual); + if (g_mmu->page_table_array) + iounmap(g_mmu->page_table_array); + } else { + /* stlb_virtual is same alloc on alloc mtlb_virtual in func MMUEnable() + * so, should not free g_mmu->stlb_virtual.But free handle g_mmu->mtlb_physical + * size should be ( g_mmu->mtlb_size+g_mmu->stlb_size) + * */ + if (g_mmu->mtlb_virtual) + dma_free_coherent(&platformdev->dev, g_mmu->mtlb_size+g_mmu->stlb_size, + g_mmu->mtlb_virtual, (dma_addr_t)g_mmu->mtlb_physical); + if (g_mmu->page_table_array) + dma_free_coherent(&platformdev->dev, g_mmu->page_table_array_size, + g_mmu->page_table_array, (dma_addr_t)g_mmu->page_table_array_physical); + } + DeleteMutex(g_mmu->page_table_mutex); + + for (i = 0; i < MMU_REGION_COUNT; i++) { + DeleteMutex(g_mmu->region[i].node_mutex); + if (simple_map) { + p = g_mmu->region[i].simple_map_head; + while(p) { + tmp = p->next; + FreeMemory(p); + p = tmp; + MMUDEBUG(" *****clean node*****\n"); + } + } else { + fp = g_mmu->region[i].free_map_head; + p = g_mmu->region[i].map_head; + while(fp) { + tmp = fp->next; + FreeMemory(fp); + fp = tmp; + MMUDEBUG(" *****clean free node*****\n"); + } + + while(p) { + tmp = p->next; + FreeMemory(p); + p = tmp; + MMUDEBUG(" *****clean node*****\n"); + } + } + } + FreeMemory(g_mmu); + + platform_device_unregister(platformdev); + platform_driver_unregister(&hantro_drm_platform_driver); + pr_info("Unregister platform device.\n"); + + for (i = 0; i < MAX_SUBSYS_NUM; i++) { + if (hwregs[i][0] != NULL) + iowrite32(0, (void*)(hwregs[i][0] + MMU_REG_CONTROL)); + if (hwregs[i][1] != NULL) + iowrite32(0, (void*)(hwregs[i][1] + MMU_REG_CONTROL)); + } + mmu_enable = 0; + mmu_init = 0; + + return MMU_STATUS_OK; +} + +/*------------------------------------------------------------------------------ + Function name: MMUEnable + Description: + Create TLB, set registers and enable MMU + + For pcie, TLB buffers come from FPGA memory and The distribution is as follows + MTLB: start from: 0x00100000, size: 4K bits + page table array: 0x00200000 64 bits + STLB: 0x00300000 4M bits + ------------------------------------------------------------------------------*/ +enum MMUStatus MMUEnable(volatile unsigned char *hwregs[MAX_SUBSYS_NUM][2]) { + enum MMUStatus status; + unsigned int address; + unsigned int mutex = MMU_FALSE; + dma_addr_t dma_handle; + u32 i = 0; + u32 address_ext; + u32 total_table_size; + + if(mmu_enable == MMU_TRUE) { + pr_info(" *****MMU Already Enabled*****\n"); + return MMU_STATUS_OK; + } + + pr_info(" *****MMU Enable...*****\n"); + + AcquireMutex(g_mmu->page_table_mutex, MMU_INFINITE); + mutex = MMU_TRUE; + if(pcie) { + g_mmu->mtlb_size = MMU_MTLB_SIZE; + g_mmu->mtlb_virtual = ioremap(gBaseDDRHw+MTLB_PCIE_START_ADDRESS, g_mmu->mtlb_size); + MMUDEBUG("gBaseDDRHw=0x%llx, g_mmu->mtlb_virtual=0x%llx\n", gBaseDDRHw, g_mmu->mtlb_virtual); + g_mmu->mtlb_physical = MTLB_PCIE_START_ADDRESS; + + g_mmu->page_table_array = ioremap(gBaseDDRHw+PAGE_PCIE_START_ADDRESS, PAGE_TABLE_ENTRY_SIZE); + } else { + /* Allocate the 4K mode MTLB table. */ + total_table_size = MMU_MTLB_SIZE + MMU_MTLB_ENTRY_NUM*MMU_STLB_4K_SIZE; + g_mmu->mtlb_size = MMU_MTLB_SIZE; + g_mmu->mtlb_virtual = dma_alloc_coherent(&platformdev->dev, total_table_size, + &dma_handle, GFP_KERNEL | GFP_DMA); + MMUDEBUG(" *****g_mmu->mtlb_virtual = 0x%llx\n", g_mmu->mtlb_virtual); + g_mmu->mtlb_physical = (unsigned long long)dma_handle; + MMUDEBUG(" *****mtlb_physical = 0x%llx\n", (unsigned int)g_mmu->mtlb_physical); + if(g_mmu->mtlb_virtual == NULL) { + pr_err("hantrodec alloc buffer fail\n"); + status = MMU_STATUS_FALSE; + goto onerror; + } + + g_mmu->page_table_array_size = PAGE_TABLE_ENTRY_SIZE; + g_mmu->page_table_array = dma_alloc_coherent(&platformdev->dev, g_mmu->page_table_array_size, + &dma_handle, GFP_KERNEL | GFP_DMA); + MMUDEBUG(" *****g_mmu->page_table_array = 0x%llx\n", g_mmu->page_table_array); + g_mmu->page_table_array_physical = (unsigned long long)dma_handle; + MMUDEBUG(" *****page_table_array_physical = 0x%llx\n", (unsigned int)g_mmu->page_table_array_physical); + if(g_mmu->page_table_array == NULL) { + pr_err("hantrodec alloc buffer fail\n"); + status = MMU_STATUS_FALSE; + goto onerror; + } + } + + *((unsigned int*)g_mmu->page_table_array) = + (g_mmu->mtlb_physical & 0xFFFFFC00) | (0 << 0); + *((unsigned int *)g_mmu->page_table_array+1) = + (u32)(g_mmu->mtlb_physical >> 32)&0xff; + *((unsigned int *)g_mmu->page_table_array+2) = + (g_mmu->mtlb_physical & 0xFFFFFC00) | (0 << 0); + *((unsigned int *)g_mmu->page_table_array+3) = + (u32)(g_mmu->mtlb_physical >> 32)&0xff; + + MMUDEBUG(" Page table array[0]: lsb = 0x%08x\n", ((int *)g_mmu->page_table_array)[0]); + MMUDEBUG(" msb = 0x%08x\n", ((int *)g_mmu->page_table_array)[1]); + + ZeroMemory(g_mmu->mtlb_virtual, total_table_size); + + ReleaseMutex(g_mmu->page_table_mutex); + + MMU_ON_ERROR(SetupDynamicSpace()); + + if(pcie) { + address = PAGE_PCIE_START_ADDRESS; + } else { + address = g_mmu->page_table_array_physical; + address_ext = ((u32)(g_mmu->page_table_array_physical >> 32))&0xff; + } + +#ifndef HANTROVCMD_ENABLE_IP_SUPPORT + encoder_dev_clk_lock(); + /* set regs of all MMUs */ + for (i = 0; i < MAX_SUBSYS_NUM; i++) { + if (hwregs[i][0] != NULL) { + MMUDEBUG("hwregs[%d][0]=%p, id=0x%08x", i, hwregs[i][0], ioread32((void*)hwregs[i][0] + MMU_REG_HW_ID)); + iowrite32(address, (void*)(hwregs[i][0] + MMU_REG_ADDRESS)); + iowrite32(address_ext, (void *)(hwregs[i][0] + MMU_REG_ADDRESS_MSB)); + + iowrite32(0x10000, (void*)(hwregs[i][0] + MMU_REG_PAGE_TABLE_ID)); + iowrite32(0x00000, (void*)(hwregs[i][0] + MMU_REG_PAGE_TABLE_ID)); + + iowrite32(1, (void*)(hwregs[i][0] + MMU_REG_CONTROL)); + } + if (hwregs[i][1] != NULL) { + MMUDEBUG("hwregs[%d][1]=%p, id=0x%08x", i, hwregs[i][1], ioread32((void*)hwregs[i][1] + MMU_REG_HW_ID)); + iowrite32(address, (void*)(hwregs[i][1] + MMU_REG_ADDRESS)); + iowrite32(address_ext, (void *)(hwregs[i][1] + MMU_REG_ADDRESS_MSB)); + + iowrite32(0x10000, (void*)(hwregs[i][1] + MMU_REG_PAGE_TABLE_ID)); + iowrite32(0x00000, (void*)(hwregs[i][1] + MMU_REG_PAGE_TABLE_ID)); + + iowrite32(1, (void*)(hwregs[i][1] + MMU_REG_CONTROL)); + } + } + encoder_dev_clk_unlock(); +#endif + mmu_enable = MMU_TRUE; + return MMU_STATUS_OK; + +onerror: + if (mutex) { + ReleaseMutex(g_mmu->page_table_mutex); + } + MMUDEBUG(" *****MMU Enable Error*****\n"); + return status; +} + +/*------------------------------------------------------------------------------ + Function name: MMUFlush + Description: + Flush MMU reg to update cache in MMU. + ------------------------------------------------------------------------------*/ +static enum MMUStatus MMUFlush(u32 core_id, volatile unsigned char *hwregs[MAX_SUBSYS_NUM][2]) { + enum MMUStatus status; + unsigned int mutex = MMU_FALSE; + + MMUDEBUG(" *****MMU Flush*****\n"); + AcquireMutex(g_mmu->page_table_mutex, MMU_INFINITE); + mutex = MMU_TRUE; + + encoder_dev_clk_lock(); + if (hwregs[core_id][0] != NULL) { + iowrite32(0x10, (void*)(hwregs[core_id][0] + MMU_REG_FLUSH)); + iowrite32(0x00, (void*)(hwregs[core_id][0] + MMU_REG_FLUSH)); + } else { + pr_err("hantrodec alloc buffer fail\n"); + status = MMU_STATUS_FALSE; + goto onerror; + } + if (hwregs[core_id][1] != NULL) { + iowrite32(0x10, (void*)(hwregs[core_id][1] + MMU_REG_FLUSH)); + iowrite32(0x00, (void*)(hwregs[core_id][1] + MMU_REG_FLUSH)); + } + encoder_dev_clk_unlock(); + ReleaseMutex(g_mmu->page_table_mutex); + return MMU_STATUS_OK; + +onerror: + if (mutex) { + encoder_dev_clk_unlock(); + ReleaseMutex(g_mmu->page_table_mutex); + } + MMUDEBUG(" *****MMU Flush Error*****\n"); + return status; +} + +static enum MMUStatus MMUMemNodeMap(struct addr_desc *addr, struct file *filp) { + enum MMUStatus status; + unsigned int page_count = 0; + unsigned int i = 0; + struct MMUNode *p; + unsigned long long address = 0x0; + unsigned int *page_table_entry; + enum MMURegion e; + unsigned int mutex = MMU_FALSE; + u32 ext_addr; + + MMUDEBUG(" *****MMU Map*****\n"); + AcquireMutex(g_mmu->page_table_mutex, MMU_INFINITE); + mutex = MMU_TRUE; + + page_count = (addr->size - 1)/PAGE_SIZE + 1; + + GetPhysicalAddress(addr->virtual_address, &address); + MMUDEBUG(" *****MMU map address*****%x\n", address); + if(address >= REGION_IN_START && + address + addr->size < REGION_IN_END) + e = MMU_REGION_IN; + else if(address >= REGION_OUT_START && + address + addr->size < REGION_OUT_END) + e = MMU_REGION_OUT; + else if(address >= REGION_PRIVATE_START && + address + addr->size < REGION_PRIVATE_END) + e = MMU_REGION_PRIVATE; + else + e = MMU_REGION_PUB; + + if (simple_map) { + MMU_ON_ERROR(SMCheckAddress(e, addr->virtual_address)); + + SMCreateNode(e, &p, page_count); + MMUDEBUG(" *****Node map size*****%d\n", page_count); + + p->buf_virtual_address = addr->virtual_address; + p->process_id = GetProcessID(); + p->filp = filp; + + p->mtlb_start = ((address + map_shift) >> MMU_MTLB_SHIFT); + p->stlb_start = ((address + map_shift) >> MMU_STLB_4K_SHIFT ) & 0x3FF; + p->mtlb_end = (page_count + p->stlb_start) / MMU_STLB_4K_ENTRY_NUM + + p->mtlb_start; + p->stlb_end = (page_count + p->stlb_start) % MMU_STLB_4K_ENTRY_NUM; + p->page_count = page_count; + + for(i = 0;i < page_count; i++) { + GetPhysicalAddress(addr->virtual_address + i * PAGE_SIZE, &address); + GetPageEntry(p, &page_table_entry, i); + ext_addr = ((u32)(address>>32))&0xff; + address = (address & 0xFFFFF000) + /* ext address , physical address bits [39,32]*/ + | (ext_addr << 4) + /* writable */ + | (1 << 2) + /* Ignore exception */ + | (0 << 1) + /* Present */ + | (1 << 0); + WritePageEntry(page_table_entry, address); + } + + /* Purpose of Bare_metal mode: input bus address==mmu address*/ + addr->bus_address = p->mtlb_start << MMU_MTLB_SHIFT + | p->stlb_start << MMU_STLB_4K_SHIFT; + } else { + MMU_ON_ERROR(FindFreeNode(e, &p, page_count)); + + SplitFreeNode(e, &p, page_count); + MMUDEBUG(" *****Node map size*****%d\n", p->page_count); + + p->buf_virtual_address = addr->virtual_address; + p->process_id = GetProcessID(); + p->filp = filp; + + for(i = 0;i < page_count; i++) { + GetPhysicalAddress(addr->virtual_address + i * PAGE_SIZE, &address); + GetPageEntry(p, &page_table_entry, i); + ext_addr = ((u32)(address>>32))&0xff; + address = (address & 0xFFFFF000) + /* ext address , physical address bits [39,32]*/ + | (ext_addr << 4) + /* writable */ + | (1 << 2) + /* Ignore exception */ + | (0 << 1) + /* Present */ + | (1 << 0); + WritePageEntry(page_table_entry, address); + } + addr->bus_address = p->mtlb_start << MMU_MTLB_SHIFT + | p->stlb_start << MMU_STLB_4K_SHIFT; + } + MMUDEBUG(" MMU_MTLB_SHIFT %d MMU_STLB_4K_SHIFT %d\n", MMU_MTLB_SHIFT, MMU_STLB_4K_SHIFT); + MMUDEBUG(" MMUMemNodeMap map total %d pages in region %d\nMTLB/STLB starts %d/%d, MTLB/STLB ends %d/%d\n", + page_count, (u32)e, p->mtlb_start, p->stlb_start, p->mtlb_end, p->stlb_end); + MMUDEBUG(" MMUMemNodeMap map %p -> 0x%08x\n", addr->virtual_address, addr->bus_address); + + ReleaseMutex(g_mmu->page_table_mutex); + + return MMU_STATUS_OK; + +onerror: + if (mutex) { + ReleaseMutex(g_mmu->page_table_mutex); + } + MMUDEBUG(" *****MMU Map Error*****\n"); + return status; +} + +static enum MMUStatus MMUMemNodeUnmap(struct addr_desc *addr) { + unsigned int i; + unsigned long long address = 0x0; + unsigned int *page_table_entry; + int process_id = GetProcessID(); + enum MMURegion e = MMU_REGION_COUNT; + enum MMUStatus status = MMU_STATUS_OUT_OF_MEMORY; + struct MMUNode *p; + unsigned int mutex = MMU_FALSE; + + MMUDEBUG(" *****MMU Unmap*****\n"); + AcquireMutex(g_mmu->page_table_mutex, MMU_INFINITE); + mutex = MMU_TRUE; + + GetPhysicalAddress(addr->virtual_address, &address); + if(address >= REGION_IN_START && + address < REGION_IN_END) + e = MMU_REGION_IN; + else if(address >= REGION_OUT_START && + address < REGION_OUT_END) + e = MMU_REGION_OUT; + else if(address >= REGION_PRIVATE_START && + address < REGION_PRIVATE_END) + e = MMU_REGION_PRIVATE; + else + e = MMU_REGION_PUB; + + if (simple_map) + p = g_mmu->region[e].simple_map_head->next; + else + p = g_mmu->region[e].map_head->next; + /* Reset STLB of the node */ + while(p) { + if(p->buf_virtual_address == addr->virtual_address && + p->process_id == process_id) { + for(i = 0;i < p->page_count; i++) { + GetPageEntry(p, &page_table_entry, i); + address = 0; + WritePageEntry(page_table_entry, address); + } + break; + } + p = p->next; + } + if(!p) + goto onerror; + + if (simple_map) + SMRemoveNode(e, addr->virtual_address, process_id); + else + RemoveNode(e, addr->virtual_address, process_id); + + ReleaseMutex(g_mmu->page_table_mutex); + return MMU_STATUS_OK; + +onerror: + if (mutex) { + ReleaseMutex(g_mmu->page_table_mutex); + } + MMUDEBUG(" *****MMU Unmap Error*****\n"); + return status; +} + +enum MMUStatus MMUKernelMemNodeMap(struct kernel_addr_desc *addr) { + enum MMUStatus status; + unsigned int page_count = 0; + unsigned int i = 0; + struct MMUNode *p; + unsigned long long address = 0x0; + unsigned int *page_table_entry; + enum MMURegion e; + unsigned int mutex = MMU_FALSE; + u32 ext_addr; + u32 page_entry_value = 0; + + MMUDEBUG(" *****MMU Map*****\n"); + AcquireMutex(g_mmu->page_table_mutex, MMU_INFINITE); + mutex = MMU_TRUE; + + page_count = (addr->size - 1)/PAGE_SIZE + 1; + + address = addr->bus_address; + MMUDEBUG(" *****MMU map address*****%x\n", address); + if(address >= REGION_IN_START && + address + addr->size < REGION_IN_END) + e = MMU_REGION_IN; + else if(address >= REGION_OUT_START && + address + addr->size < REGION_OUT_END) + e = MMU_REGION_OUT; + else if(address >= REGION_PRIVATE_START && + address + addr->size < REGION_PRIVATE_END) + e = MMU_REGION_PRIVATE; + else + e = MMU_REGION_PUB; + + if (simple_map) { + //TODO: should check bus addr + //MMU_ON_ERROR(SMCheckAddress(e, addr->virtual_address)); + + SMCreateNode(e, &p, page_count); + MMUDEBUG(" *****Node map size*****%d\n", page_count); + + p->buf_bus_address = addr->bus_address; + p->process_id = GetProcessID(); + p->filp = NULL; + + p->mtlb_start = ((address + map_shift) >> MMU_MTLB_SHIFT); + p->stlb_start = ((address + map_shift) >> MMU_STLB_4K_SHIFT ) & 0x3FF; + p->mtlb_end = (page_count + p->stlb_start) / MMU_STLB_4K_ENTRY_NUM + + p->mtlb_start; + p->stlb_end = (page_count + p->stlb_start) % MMU_STLB_4K_ENTRY_NUM; + p->page_count = page_count; + + for(i = 0;i < page_count; i++) { + /* this function used in kernel only, so we think it's a contunuous buffer*/ + address += (i ? PAGE_SIZE : 0); + GetPageEntry(p, &page_table_entry, i); + ext_addr = ((u32)(address>>32))&0xff; + page_entry_value = (address & 0xFFFFF000) + /* ext address , physical address bits [39,32]*/ + | (ext_addr << 4) + /* writable */ + | (1 << 2) + /* Ignore exception */ + | (0 << 1) + /* Present */ + | (1 << 0); + WritePageEntry(page_table_entry, page_entry_value); + } + + /* Purpose of Bare_metal mode: input bus address==mmu address*/ + addr->mmu_bus_address = p->mtlb_start << MMU_MTLB_SHIFT + | p->stlb_start << MMU_STLB_4K_SHIFT; + } else { + MMU_ON_ERROR(FindFreeNode(e, &p, page_count)); + + SplitFreeNode(e, &p, page_count); + MMUDEBUG(" *****Node map size*****%d\n", p->page_count); + + p->buf_bus_address = addr->bus_address; + p->process_id = GetProcessID(); + p->filp = NULL; + + for(i = 0;i < page_count; i++) { + /* this function used in kernel only, so we think it's a contunuous buffer*/ + address += (i ? PAGE_SIZE : 0); + GetPageEntry(p, &page_table_entry, i); + ext_addr = ((u32)(address>>32))&0xff; + page_entry_value = (address & 0xFFFFF000) + /* ext address , physical address bits [39,32]*/ + | (ext_addr << 4) + /* writable */ + | (1 << 2) + /* Ignore exception */ + | (0 << 1) + /* Present */ + | (1 << 0); + WritePageEntry(page_table_entry, page_entry_value); + } + addr->mmu_bus_address = p->mtlb_start << MMU_MTLB_SHIFT + | p->stlb_start << MMU_STLB_4K_SHIFT; + } + MMUDEBUG(" KERNEL MMU_MTLB_SHIFT %d MMU_STLB_4K_SHIFT %d\n", MMU_MTLB_SHIFT, MMU_STLB_4K_SHIFT); + MMUDEBUG(" MMUKernelMemNodeMap map total %d pages in region %d\nMTLB/STLB starts %d/%d, MTLB/STLB ends %d/%d\n", + page_count, (u32)e, p->mtlb_start, p->stlb_start, p->mtlb_end, p->stlb_end); + MMUDEBUG(" MMUKernelMemNodeMap map 0x%llx -> 0x%08x\n", addr->bus_address, addr->mmu_bus_address); + + ReleaseMutex(g_mmu->page_table_mutex); + + return MMU_STATUS_OK; + +onerror: + if (mutex) { + ReleaseMutex(g_mmu->page_table_mutex); + } + MMUDEBUG(" *****MMU Map Error*****\n"); + return status; +} + +enum MMUStatus MMUKernelMemNodeUnmap(struct kernel_addr_desc *addr) { + unsigned int i; + unsigned long long address = 0x0; + unsigned int *page_table_entry; + int process_id = GetProcessID(); + enum MMURegion e = MMU_REGION_COUNT; + enum MMUStatus status = MMU_STATUS_OUT_OF_MEMORY; + struct MMUNode *p; + unsigned int mutex = MMU_FALSE; + + MMUDEBUG(" *****MMU Unmap*****\n"); + AcquireMutex(g_mmu->page_table_mutex, MMU_INFINITE); + mutex = MMU_TRUE; + + address = addr->bus_address; + if(address >= REGION_IN_START && + address < REGION_IN_END) + e = MMU_REGION_IN; + else if(address >= REGION_OUT_START && + address < REGION_OUT_END) + e = MMU_REGION_OUT; + else if(address >= REGION_PRIVATE_START && + address < REGION_PRIVATE_END) + e = MMU_REGION_PRIVATE; + else + e = MMU_REGION_PUB; + + if (simple_map) + p = g_mmu->region[e].simple_map_head->next; + else + p = g_mmu->region[e].map_head->next; + /* Reset STLB of the node */ + while(p) { + if(p->buf_bus_address == addr->bus_address && + p->process_id == process_id) { + for(i = 0;i < p->page_count; i++) { + GetPageEntry(p, &page_table_entry, i); + address = 0; + WritePageEntry(page_table_entry, address); + } + break; + } + p = p->next; + } + if(!p) + goto onerror; + + if (simple_map) + SMRemoveKernelNode(e, addr->bus_address, process_id); + else + RemoveKernelNode(e, addr->bus_address, process_id); + + ReleaseMutex(g_mmu->page_table_mutex); + return MMU_STATUS_OK; + +onerror: + if (mutex) { + ReleaseMutex(g_mmu->page_table_mutex); + } + MMUDEBUG(" *****MMU Unmap Error*****\n"); + return status; +} + + +static long MMUCtlBufferMap(struct file *filp, unsigned long arg) { + struct addr_desc addr; + long tmp; + + tmp = copy_from_user(&addr, (void*)arg, sizeof(struct addr_desc)); + if (tmp) { + MMUDEBUG("copy_from_user failed, returned %li\n", tmp); + return -MMU_EFAULT; + } + + MMUMemNodeMap(&addr, filp); + + tmp = copy_to_user((void*) arg, &addr, sizeof(struct addr_desc)); + if (tmp) { + MMUDEBUG("copy_to_user failed, returned %li\n", tmp); + return -MMU_EFAULT; + } + return 0; +} + +static long MMUCtlBufferUnmap(unsigned long arg) { + struct addr_desc addr; + long tmp; + + tmp = copy_from_user(&addr, (void*)arg, sizeof(struct addr_desc)); + if (tmp) { + MMUDEBUG("copy_from_user failed, returned %li\n", tmp); + return -MMU_EFAULT; + } + + MMUMemNodeUnmap(&addr); + return 0; +} + +static long MMUCtlEnable(unsigned long arg, volatile unsigned char *hwregs[HXDEC_MAX_CORES][2]) { + unsigned int enable; + long tmp; + + tmp = copy_from_user(&enable, (void*)arg, sizeof(unsigned int)); + if (tmp) { + MMUDEBUG("copy_from_user failed, returned %li\n", tmp); + return -MMU_EFAULT; + } + + MMUEnable(hwregs); + + return 0; +} + +static long MMUCtlFlush(unsigned long arg, volatile unsigned char *hwregs[HXDEC_MAX_CORES][2]) { + unsigned int core_id; + long tmp; + + tmp = copy_from_user(&core_id, (void*)arg, sizeof(unsigned int)); + if (tmp) { + MMUDEBUG("copy_from_user failed, returned %li\n", tmp); + return -MMU_EFAULT; + } + + MMUFlush(core_id, hwregs); + + return 0; +} + +long MMUIoctl(unsigned int cmd, void *filp, unsigned long arg, + volatile unsigned char *hwregs[HXDEC_MAX_CORES][2]) { + + u32 i = 0; + encoder_dev_clk_lock(); + for (i = 0; i < MAX_SUBSYS_NUM; i++) { + if (hwregs[i][0] != NULL && + (ioread32((void*)(hwregs[i][0] + MMU_REG_HW_ID))>>16) != 0x4D4D) + { + encoder_dev_clk_unlock(); + return -MMU_ENOTTY; + } + if (hwregs[i][1] != NULL && + (ioread32((void*)(hwregs[i][1] + MMU_REG_HW_ID))>>16) != 0x4D4D) + { + encoder_dev_clk_unlock(); + return -MMU_ENOTTY; + } + MMUDEBUG("mmu_hwregs[%d][0].mmu_hwregs[0]=%p", i, hwregs[i][0]); + MMUDEBUG("mmu_hwregs[%d][1].mmu_hwregs[0]=%p", i, hwregs[i][1]); + } + encoder_dev_clk_unlock(); + switch (cmd) { + case HANTRO_IOCS_MMU_MEM_MAP: { + return (MMUCtlBufferMap((struct file *)filp, arg)); + } + case HANTRO_IOCS_MMU_MEM_UNMAP: { + return (MMUCtlBufferUnmap(arg)); + } + case HANTRO_IOCS_MMU_ENABLE: { + return (MMUCtlEnable(arg, hwregs)); + } + case HANTRO_IOCS_MMU_FLUSH: { + return (MMUCtlFlush(arg, hwregs)); + } + default: + return -MMU_ENOTTY; + } +} + +unsigned long long GetMMUAddress(void) +{ + unsigned long long address = 0; + if(pcie) + address = PAGE_PCIE_START_ADDRESS; + else + address = g_mmu->page_table_array_physical; + + return address; +} + +void MMURestore(volatile unsigned char *hwregs[MAX_SUBSYS_NUM][2]) +{ + if (g_mmu == NULL) + return; + + int i; + unsigned int address; + u32 address_ext; + address = g_mmu->page_table_array_physical; + address_ext = ((u32)(g_mmu->page_table_array_physical >> 32))&0xff; + for (i = 0; i < MAX_SUBSYS_NUM; i++) { + if (hwregs[i][0] != NULL) { + iowrite32(address, (void*)(hwregs[i][0] + MMU_REG_ADDRESS)); + iowrite32(address_ext, (void *)(hwregs[i][0] + MMU_REG_ADDRESS_MSB)); + + iowrite32(0x10000, (void*)(hwregs[i][0] + MMU_REG_PAGE_TABLE_ID)); + iowrite32(0x00000, (void*)(hwregs[i][0] + MMU_REG_PAGE_TABLE_ID)); + + iowrite32(1, (void*)(hwregs[i][0] + MMU_REG_CONTROL)); + } + if (hwregs[i][1] != NULL) { + iowrite32(address, (void*)(hwregs[i][1] + MMU_REG_ADDRESS)); + iowrite32(address_ext, (void *)(hwregs[i][1] + MMU_REG_ADDRESS_MSB)); + + iowrite32(0x10000, (void*)(hwregs[i][1] + MMU_REG_PAGE_TABLE_ID)); + iowrite32(0x00000, (void*)(hwregs[i][1] + MMU_REG_PAGE_TABLE_ID)); + + iowrite32(1, (void*)(hwregs[i][1] + MMU_REG_CONTROL)); + } + } +} diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/hantrommu.h b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/hantrommu.h new file mode 100755 index 00000000000000..6c2184f80c101d --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/hantrommu.h @@ -0,0 +1,155 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#ifndef _HANTROMMU_H_ +#define _HANTROMMU_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __FREERTOS__ +#elif defined(__linux__) +#include +#endif + +#define REGION_IN_START 0x0 +#define REGION_IN_END 0x0 +#define REGION_OUT_START 0x0 +#define REGION_OUT_END 0x0 +#define REGION_PRIVATE_START 0x0 +#define REGION_PRIVATE_END 0x0 + +#define REGION_IN_MMU_START 0x1000 +#define REGION_IN_MMU_END 0x40002000 +#define REGION_OUT_MMU_START 0x40002000 +#define REGION_OUT_MMU_END 0x40002000 +#define REGION_PRIVATE_MMU_START 0x40002000 +#define REGION_PRIVATE_MMU_END 0x40002000 + +#define MMU_REG_OFFSET 0 +#define MMU_REG_HW_ID (MMU_REG_OFFSET + 6*4) +#define MMU_REG_FLUSH (MMU_REG_OFFSET + 97*4) +#define MMU_REG_PAGE_TABLE_ID (MMU_REG_OFFSET + 107*4) +#define MMU_REG_CONTROL (MMU_REG_OFFSET + 226*4) +#define MMU_REG_ADDRESS (MMU_REG_OFFSET + 227*4) +#define MMU_REG_ADDRESS_MSB (MMU_REG_OFFSET + 228*4) + +#define MTLB_PCIE_START_ADDRESS 0x00100000 +#define PAGE_PCIE_START_ADDRESS 0x00200000 /* page_table_entry start address */ +#define STLB_PCIE_START_ADDRESS 0x00300000 +#define PAGE_TABLE_ENTRY_SIZE 64 + +enum MMUStatus { + MMU_STATUS_OK = 0, + + MMU_STATUS_FALSE = -1, + MMU_STATUS_INVALID_ARGUMENT = -2, + MMU_STATUS_INVALID_OBJECT = -3, + MMU_STATUS_OUT_OF_MEMORY = -4, + MMU_STATUS_NOT_FOUND = -19, +}; + +struct addr_desc { + void *virtual_address; /* buffer virtual address */ + unsigned int bus_address; /* buffer physical address */ + unsigned int size; /* physical size */ +}; + +struct kernel_addr_desc { + unsigned long long bus_address; /* buffer virtual address */ + unsigned int mmu_bus_address; /* buffer physical address in MMU*/ + unsigned int size; /* physical size */ +}; + + +#define HANTRO_IOC_MMU 'm' + +#define HANTRO_IOCS_MMU_MEM_MAP _IOWR(HANTRO_IOC_MMU, 1, struct addr_desc *) +#define HANTRO_IOCS_MMU_MEM_UNMAP _IOWR(HANTRO_IOC_MMU, 2, struct addr_desc *) +#define HANTRO_IOCS_MMU_ENABLE _IOWR(HANTRO_IOC_MMU, 3, unsigned int *) +#define HANTRO_IOCS_MMU_FLUSH _IOWR(HANTRO_IOC_MMU, 4, unsigned int *) +#define HANTRO_IOC_MMU_MAXNR 4 + +#define MAX_SUBSYS_NUM 4 /* up to 4 subsystem (temporary) */ +#define HXDEC_MAX_CORES MAX_SUBSYS_NUM /* used in hantro_dec.c */ + +/* Init MMU, should be called in driver init function. */ +enum MMUStatus MMUInit(volatile unsigned char *hwregs); +/* Clean up all data in MMU, should be called in driver cleanup function + when rmmod driver*/ +enum MMUStatus MMUCleanup(volatile unsigned char *hwregs[MAX_SUBSYS_NUM][2]); +/* The function should be called in driver realease function + when driver exit unnormally */ +enum MMUStatus MMURelease(void *filp, volatile unsigned char *hwregs); + +enum MMUStatus MMUEnable(volatile unsigned char *hwregs[MAX_SUBSYS_NUM][2]); + +/* Used in kernel to map buffer */ +enum MMUStatus MMUKernelMemNodeMap(struct kernel_addr_desc *addr); + +/* Used in kernel to unmap buffer */ +enum MMUStatus MMUKernelMemNodeUnmap(struct kernel_addr_desc *addr); + +unsigned long long GetMMUAddress(void); +long MMUIoctl(unsigned int cmd, void *filp, unsigned long arg, + volatile unsigned char *hwregs[MAX_SUBSYS_NUM][2]); + +void MMURestore(volatile unsigned char *hwregs[MAX_SUBSYS_NUM][2]); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_axife.c b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_axife.c new file mode 100644 index 00000000000000..90b33933b57978 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_axife.c @@ -0,0 +1,98 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0)) +#include +#else +#include +#endif +#include +#include + +#include "vc8000_axife.h" +/* mode description + * 1: OYB normal(enable) + * 2: bypass +*/ +u32 AXIFEEnable(volatile u8 *hwregs, u32 mode) { +#ifndef HANTROVCMD_ENABLE_IP_SUPPORT + if (!hwregs) return -1; + + //AXI FE pass through + if(mode == 1) + { + iowrite32(0x02, (void *)(hwregs+AXI_REG10_SW_FRONTEND_EN)); + iowrite32(0x00, (void *)(hwregs+AXI_REG11_SW_WORK_MODE)); + } + else if(mode == 2) + { + iowrite32(0x02, (void *)(hwregs+AXI_REG10_SW_FRONTEND_EN)); + iowrite32(0x40, (void *)(hwregs+AXI_REG11_SW_WORK_MODE)); + } + printk(KERN_INFO "AXIFEEnable: axife_reg10_addr=0x%p, *axife_reg10_addr=0x%08x\n", hwregs + 10*4, ioread32((void *)(hwregs + 10*4))); + printk(KERN_INFO "AXIFEEnable: axife_reg11_addr=0x%p, *axife_reg11_addr=0x%08x\n", hwregs + 11*4, ioread32((void *)(hwregs + 11*4))); +#endif + return 0; +} diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_axife.h b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_axife.h new file mode 100644 index 00000000000000..a430814197ec8d --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_axife.h @@ -0,0 +1,77 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#ifndef __VC8000_AXIFE_H__ +#define __VC8000_AXIFE_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __FREERTOS__ +#elif defined(__linux__) +#include +#include +#endif + +#define AXI_REG10_SW_FRONTEND_EN 10*4 //0x28 +#define AXI_REG11_SW_WORK_MODE 11*4 //0x2c + +u32 AXIFEEnable(volatile u8 *hwregs, u32 mode); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_devfreq.h b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_devfreq.h new file mode 100644 index 00000000000000..68aa3193a7de77 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_devfreq.h @@ -0,0 +1,54 @@ +#ifndef __DEC_DEVFREQ_H__ +#define __DEC_DEVFREQ_H__ + +#include +#include +struct devfreq; +struct opp_table; + +struct encoder_devfreq { + int busy_count; + struct devfreq *df; +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + struct opp_table *clkname_opp_table; +#else + int token; +#endif + bool opp_of_table_added; + bool update_freq_flag; + unsigned long next_target_freq; + unsigned long cur_devfreq; + unsigned long max_freq; + wait_queue_head_t target_freq_wait_queue; +#ifdef CONFIG_TH1520_SYSTEM_MONITOR + struct monitor_dev_info *mdev_info; +#endif + + ktime_t busy_time; + ktime_t idle_time; + ktime_t time_last_update; + ktime_t based_maxfreq_busy_time; + ktime_t based_maxfreq_last_busy_t; + int busy_record_count; + + /* + * Protect busy_time, idle_time, time_last_update and busy_count + * because these can be updated concurrently, for example by the GP + * and PP interrupts. + */ + spinlock_t lock; + + struct mutex clk_mutex; /* clk freq changed lock,for vdec cannot changed clk rate in hw working*/ +}; +void encoder_devfreq_fini(struct device *dev); +int encoder_devfreq_init(struct device *dev) ; +void encoder_devfreq_record_busy(struct encoder_devfreq *devfreq); +void encoder_devfreq_record_idle(struct encoder_devfreq *devfreq); +struct encoder_devfreq * encoder_get_devfreq_priv_data(void); +int encoder_devfreq_resume(struct encoder_devfreq *devfreq); +int encoder_devfreq_suspend(struct encoder_devfreq *devfreq); +int encoder_devfreq_set_rate(struct device * dev); + +void encoder_dev_clk_lock(void); +void encoder_dev_clk_unlock(void); +#endif diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_driver.c b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_driver.c new file mode 100644 index 00000000000000..a3a14a6bfcd080 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_driver.c @@ -0,0 +1,102 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ +#include +#include +#include +#include +#include "vc8000_driver.h" + +static u32 vcmd_supported = 1; +int __init hantroenc_normal_init(void); +int __init hantroenc_vcmd_init(void); +void __exit hantroenc_normal_cleanup(void); +void __exit hantroenc_vcmd_cleanup(void); + +int __init hantroenc_init(void) +{ + if(vcmd_supported==0) + { + return hantroenc_normal_init(); + } + else + { + return hantroenc_vcmd_init(); + } +} + +void __exit hantroenc_cleanup(void) +{ + if(vcmd_supported==0) + { + hantroenc_normal_cleanup(); + } + else + { + hantroenc_vcmd_cleanup(); + } + return; +} + + +module_init(hantroenc_init); +module_exit(hantroenc_cleanup); +module_param(vcmd_supported,uint,0); + +/* module description */ +/*MODULE_LICENSE("Proprietary");*/ +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Verisilicon"); +MODULE_DESCRIPTION("VC8000 Vcmd driver"); + + diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_driver.h b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_driver.h new file mode 100644 index 00000000000000..55055ac39ee749 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_driver.h @@ -0,0 +1,349 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#ifndef _VC8000_VCMD_DRIVER_H_ +#define _VC8000_VCMD_DRIVER_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __FREERTOS__ +/* needed for the _IOW etc stuff used later */ +#include "base_type.h" +#include "osal.h" +#include "dev_common_freertos.h" +#elif defined(__linux__) +#include /* needed for the _IOW etc stuff used later */ +#else //For other os +//TODO... +#endif +#ifdef HANTROMMU_SUPPORT +#include "hantrommu.h" +#endif + +#ifdef HANTROAXIFE_SUPPORT +#include "vc8000_axife.h" +#endif + +#ifdef __FREERTOS__ +//ptr_t has been defined in base_type.h //Now the FreeRTOS mem need to support 64bit env +#elif defined(__linux__) +#undef ptr_t +#define ptr_t PTR_T_KERNEL +typedef size_t ptr_t; +#endif +/* + * Macros to help debugging + */ + +#undef PDEBUG /* undef it, just in case */ +#ifdef HANTRO_DRIVER_DEBUG +# ifdef __KERNEL__ + /* This one if debugging is on, and kernel space */ +# define PDEBUG(fmt, args...) printk( KERN_INFO "vc8000: " fmt, ## args) +# else + /* This one for user space */ +# define PDEBUG(fmt, args...) printf(__FILE__ ":%d: " fmt, __LINE__ , ## args) +# endif +#else +# define PDEBUG(fmt, args...) pr_debug("vc8000: " fmt, ## args) +#endif + +#define ENC_HW_ID1 0x48320100 +#define ENC_HW_ID2 0x80006000 +#define CORE_INFO_MODE_OFFSET 31 +#define CORE_INFO_AMOUNT_OFFSET 28 + +/* Use 'k' as magic number */ +#define HANTRO_IOC_MAGIC 'k' + +/* + * S means "Set" through a ptr, + * T means "Tell" directly with the argument value + * G means "Get": reply by setting through a pointer + * Q means "Query": response is on the return value + * X means "eXchange": G and S atomically + * H means "sHift": T and Q atomically + */ + +#define HANTRO_IOCG_HWOFFSET _IOR(HANTRO_IOC_MAGIC, 3, unsigned long *) +#define HANTRO_IOCG_HWIOSIZE _IOR(HANTRO_IOC_MAGIC, 4, unsigned int *) +#define HANTRO_IOC_CLI _IO(HANTRO_IOC_MAGIC, 5) +#define HANTRO_IOC_STI _IO(HANTRO_IOC_MAGIC, 6) +#define HANTRO_IOCX_VIRT2BUS _IOWR(HANTRO_IOC_MAGIC, 7, unsigned long *) +#define HANTRO_IOCH_ARDRESET _IO(HANTRO_IOC_MAGIC, 8) /* debugging tool */ +#define HANTRO_IOCG_SRAMOFFSET _IOR(HANTRO_IOC_MAGIC, 9, unsigned long *) +#define HANTRO_IOCG_SRAMEIOSIZE _IOR(HANTRO_IOC_MAGIC, 10, unsigned int *) +#define HANTRO_IOCH_ENC_RESERVE _IOR(HANTRO_IOC_MAGIC, 11,unsigned int *) +#define HANTRO_IOCH_ENC_RELEASE _IOR(HANTRO_IOC_MAGIC, 12,unsigned int *) +#define HANTRO_IOCG_CORE_NUM _IOR(HANTRO_IOC_MAGIC, 13,unsigned int *) +#define HANTRO_IOCG_CORE_INFO _IOR(HANTRO_IOC_MAGIC, 14,SUBSYS_CORE_INFO *) +#define HANTRO_IOCG_CORE_WAIT _IOR(HANTRO_IOC_MAGIC, 15, unsigned int *) +#define HANTRO_IOCG_ANYCORE_WAIT _IOR(HANTRO_IOC_MAGIC, 16, CORE_WAIT_OUT *) + +#define HANTRO_IOCH_GET_CMDBUF_PARAMETER _IOWR(HANTRO_IOC_MAGIC, 25,struct cmdbuf_mem_parameter *) +#define HANTRO_IOCH_GET_CMDBUF_POOL_SIZE _IOWR(HANTRO_IOC_MAGIC, 26,unsigned long) +#define HANTRO_IOCH_SET_CMDBUF_POOL_BASE _IOWR(HANTRO_IOC_MAGIC, 27,unsigned long) +#define HANTRO_IOCH_GET_VCMD_PARAMETER _IOWR(HANTRO_IOC_MAGIC, 28, struct config_parameter *) +#define HANTRO_IOCH_RESERVE_CMDBUF _IOWR(HANTRO_IOC_MAGIC, 29,struct exchange_parameter *) +#define HANTRO_IOCH_LINK_RUN_CMDBUF _IOR(HANTRO_IOC_MAGIC, 30,u16 *) +#define HANTRO_IOCH_WAIT_CMDBUF _IOR(HANTRO_IOC_MAGIC, 31,u16 *) +#define HANTRO_IOCH_RELEASE_CMDBUF _IOR(HANTRO_IOC_MAGIC, 32,u16 *) +#define HANTRO_IOCH_POLLING_CMDBUF _IOR(HANTRO_IOC_MAGIC, 33,u16 *) + +#define HANTRO_IOCH_GET_VCMD_ENABLE _IOWR(HANTRO_IOC_MAGIC, 50,unsigned long) + +#define GET_ENCODER_IDX(type_info) (CORE_VC8000E); +#define CORETYPE(core) (1 << core) +#define HANTRO_IOC_MAXNR 60 + +/*priority support*/ + +#define MAX_CMDBUF_PRIORITY_TYPE 2 //0:normal priority,1:high priority + +#define CMDBUF_PRIORITY_NORMAL 0 +#define CMDBUF_PRIORITY_HIGH 1 + +#define OPCODE_WREG (0x01<<27) +#define OPCODE_END (0x02<<27) +#define OPCODE_NOP (0x03<<27) +#define OPCODE_RREG (0x16<<27) +#define OPCODE_INT (0x18<<27) +#define OPCODE_JMP (0x19<<27) +#define OPCODE_STALL (0x09<<27) +#define OPCODE_CLRINT (0x1a<<27) +#define OPCODE_JMP_RDY0 (0x19<<27) +#define OPCODE_JMP_RDY1 ((0x19<<27)|(1<<26)) +#define JMP_IE_1 (1<<25) +#define JMP_RDY_1 (1<<26) + + +#define CLRINT_OPTYPE_READ_WRITE_1_CLEAR 0 +#define CLRINT_OPTYPE_READ_WRITE_0_CLEAR 1 +#define CLRINT_OPTYPE_READ_CLEAR 2 + +#define VC8000E_FRAME_RDY_INT_MASK 0x0001 +#define VC8000E_CUTREE_RDY_INT_MASK 0x0002 +#define VC8000E_DEC400_INT_MASK 0x0004 +#define VC8000E_L2CACHE_INT_MASK 0x0008 +#define VC8000E_MMU_INT_MASK 0x0010 +#define CUTREE_MMU_INT_MASK 0x0020 + + +#define VC8000D_FRAME_RDY_INT_MASK 0x0100 +#define VC8000D_DEC400_INT_MASK 0x0400 +#define VC8000D_L2CACHE_INT_MASK 0x0800 +#define VC8000D_MMU_INT_MASK 0x1000 + + +#define VC8000D_DEC400_INT_MASK_1_1_1 0x0200 +#define VC8000D_L2CACHE_INT_MASK_1_1_1 0x0400 +#define VC8000D_MMU_INT_MASK_1_1_1 0x0800 + + + + +#define HW_ID_1_0_C 0x43421001 +#define HW_ID_1_1_2 0x43421102 + + +#define ASIC_STATUS_SEGMENT_READY 0x1000 +#define ASIC_STATUS_FUSE_ERROR 0x200 +#define ASIC_STATUS_SLICE_READY 0x100 +#define ASIC_STATUS_LINE_BUFFER_DONE 0x080 /* low latency */ +#define ASIC_STATUS_HW_TIMEOUT 0x040 +#define ASIC_STATUS_BUFF_FULL 0x020 +#define ASIC_STATUS_HW_RESET 0x010 +#define ASIC_STATUS_ERROR 0x008 +#define ASIC_STATUS_FRAME_READY 0x004 +#define ASIC_IRQ_LINE 0x001 +#define ASIC_STATUS_ALL (ASIC_STATUS_SEGMENT_READY |\ + ASIC_STATUS_FUSE_ERROR |\ + ASIC_STATUS_SLICE_READY |\ + ASIC_STATUS_LINE_BUFFER_DONE |\ + ASIC_STATUS_HW_TIMEOUT |\ + ASIC_STATUS_BUFF_FULL |\ + ASIC_STATUS_HW_RESET |\ + ASIC_STATUS_ERROR |\ + ASIC_STATUS_FRAME_READY) + +enum +{ + CORE_VC8000E = 0, + CORE_VC8000EJ = 1, + CORE_CUTREE = 2, + CORE_DEC400 = 3, + CORE_MMU = 4, + CORE_L2CACHE = 5, + CORE_AXIFE = 6, + CORE_APBFT = 7, + CORE_MMU_1 = 8, + CORE_AXIFE_1 = 9, + CORE_MAX +}; + +//#define CORE_MAX (CORE_MMU) + +/*module_type support*/ + +enum vcmd_module_type{ + VCMD_TYPE_ENCODER = 0, + VCMD_TYPE_CUTREE, + VCMD_TYPE_DECODER, + VCMD_TYPE_JPEG_ENCODER, + VCMD_TYPE_JPEG_DECODER, + MAX_VCMD_TYPE +}; + +struct cmdbuf_mem_parameter +{ + u32 *virt_cmdbuf_addr; + ptr_t phy_cmdbuf_addr; //cmdbuf pool base physical address + u32 mmu_phy_cmdbuf_addr; //cmdbuf pool base mmu mapping address + u32 cmdbuf_total_size; //cmdbuf pool total size in bytes. + u16 cmdbuf_unit_size; //one cmdbuf size in bytes. all cmdbuf have same size. + u32 *virt_status_cmdbuf_addr; + ptr_t phy_status_cmdbuf_addr; //status cmdbuf pool base physical address + u32 mmu_phy_status_cmdbuf_addr; //status cmdbuf pool base mmu mapping address + u32 status_cmdbuf_total_size; //status cmdbuf pool total size in bytes. + u16 status_cmdbuf_unit_size; //one status cmdbuf size in bytes. all status cmdbuf have same size. + ptr_t base_ddr_addr; //for pcie interface, hw can only access phy_cmdbuf_addr-pcie_base_ddr_addr. + //for other interface, this value should be 0? +}; + +struct config_parameter +{ + u16 module_type; //input vc8000e=0,cutree=1,vc8000d=2,jpege=3, jpegd=4 + u16 vcmd_core_num; //output, how many vcmd cores are there with corresponding module_type. + u16 submodule_main_addr; //output,if submodule addr == 0xffff, this submodule does not exist. + u16 submodule_dec400_addr; //output ,if submodule addr == 0xffff, this submodule does not exist. + u16 submodule_L2Cache_addr; //output,if submodule addr == 0xffff, this submodule does not exist. + u16 submodule_MMU_addr[2]; //output,if submodule addr == 0xffff, this submodule does not exist. + u16 submodule_axife_addr[2]; //output,if submodule addr == 0xffff, this submodule does not exist. + u16 config_status_cmdbuf_id; // output , this status comdbuf save the all register values read in driver init.//used for analyse configuration in cwl. + u32 vcmd_hw_version_id; +}; + +/*need to consider how many memory should be allocated for status.*/ +struct exchange_parameter +{ + u32 executing_time; //input ;executing_time=encoded_image_size*(rdoLevel+1)*(rdoq+1); + u16 module_type; //input input vc8000e=0,IM=1,vc8000d=2,jpege=3, jpegd=4 + u16 cmdbuf_size; //input, reserve is not used; link and run is input. + u16 priority; //input,normal=0, high/live=1 + u16 cmdbuf_id; //output ,it is unique in driver. + u16 core_id; //just used for polling. +}; + +typedef struct CoreWaitOut +{ + u32 job_id[4]; + u32 irq_status[4]; + u32 irq_num; +} CORE_WAIT_OUT; + +typedef struct +{ + u32 subsys_idx; + u32 core_type; + unsigned long offset; + u32 reg_size; + int irq; +}CORE_CONFIG; + +typedef struct +{ + unsigned long base_addr; + u32 iosize; + u32 resouce_shared; //indicate the core share resources with other cores or not.If 1, means cores can not work at the same time. +}SUBSYS_CONFIG; + +typedef struct +{ + u32 type_info; //indicate which IP is contained in this subsystem and each uses one bit of this variable + unsigned long offset[CORE_MAX]; + unsigned long regSize[CORE_MAX]; + int irq[CORE_MAX]; +}SUBSYS_CORE_INFO; + +typedef struct +{ + SUBSYS_CONFIG cfg; + SUBSYS_CORE_INFO core_info; +}SUBSYS_DATA; + +struct vcmd_profile { + int dev_loading_percent; + int dev_loading_max_percent; + + int last_hw_proc_us; + int avg_hw_proc_us; + int proced_count; + int cur_submit_vcmd_id; + int cur_complete_vcmd_id; + int vcmd_num_share_irq; + + //error statistics + u32 vcmd_abort_cnt; + u32 vcmd_buserr_cnt; + u32 vcmd_timeout_cnt; + u32 vcmd_cmderr_cnt; +}; +extern struct vcmd_profile venc_vcmd_profile; +#ifdef __cplusplus +} +#endif + +#endif /* !_VC8000_VCMD_DRIVER_H_ */ diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_normal_driver.c b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_normal_driver.c new file mode 100644 index 00000000000000..ba2fbc474726a3 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_normal_driver.c @@ -0,0 +1,1459 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#include +#include +/* needed for __init,__exit directives */ +#include +/* needed for remap_page_range + SetPageReserved + ClearPageReserved +*/ +#include +/* obviously, for kmalloc */ +#include +/* for struct file_operations, register_chrdev() */ +#include +/* standard error codes */ +#include + +#include +/* request_irq(), free_irq() */ +#include +#include + +#include +#include +/* needed for virt_to_phys() */ +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* our own stuff */ +#include "vc8000_driver.h" + +unsigned long gBaseDDRHw = 0; +unsigned int pcie = 0; /* used in hantro_mmu.c*/ + +//#define MULTI_THR_TEST +#ifdef MULTI_THR_TEST + +#define WAIT_NODE_NUM 32 +struct wait_list_node +{ + u32 node_id; //index of the node + u32 used_flag; //1:the node is insert to the wait queue list. + u32 sem_used; //1:the source is released and the semphone is uped. + struct semaphore wait_sem; //the unique semphone for per reserve_encoder thread. + u32 wait_cond; //the condition for wait. Equal to the "core_info". + struct list_head wait_list; //list node. +}; +static struct list_head reserve_header; +static struct wait_list_node res_wait_node[WAIT_NODE_NUM]; + +static void wait_delay(unsigned int delay) { + if(delay > 0) { + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) + ktime_t dl = ktime_set((delay / MSEC_PER_SEC), + (delay % MSEC_PER_SEC) * NSEC_PER_MSEC); + __set_current_state(TASK_UNINTERRUPTIBLE); + schedule_hrtimeout(&dl, HRTIMER_MODE_REL); + #else + msleep(delay); + #endif + } +} + +static u32 request_wait_node(struct wait_list_node **node,u32 start_id) +{ + u32 i; + struct wait_list_node *temp_node; + while(1) + { + for(i=start_id;iused_flag==0) + { + temp_node->used_flag=1; + *node = temp_node; + return i; + } + } + wait_delay(10); + } +} + +static void request_wait_sema(struct wait_list_node **node) +{ + u32 i; + struct wait_list_node *temp_node; + while(1) + { + for(i=0;iused_flag==0)&&(temp_node->sem_used==0)) + { + temp_node->sem_used =1; + *node = temp_node; + return ; + } + } + wait_delay(10); + } +} + +static void init_wait_node(struct wait_list_node *node,u32 cond, u32 sem_flag) +{ + node->used_flag = 0; + node->wait_cond = cond; + sema_init(&node->wait_sem, sem_flag); + INIT_LIST_HEAD(&node->wait_list); + if(sem_flag>0) + { + node->sem_used =1; + } +} + +static void init_reserve_wait(u32 dev_num) +{ + u32 i; + u32 cond = 0x80000001; + u32 sem_flag =0; + struct wait_list_node *node; + +// printk("%s,%d, dev_num %d\n",__FUNCTION__,__LINE__,dev_num); + + INIT_LIST_HEAD(&reserve_header); + + for(i=0;inode_id = i; + init_wait_node(node,cond,sem_flag); + } +} + +void release_reserve_wait(u32 dev_num) +{ + +} + +#endif +/********variables declaration related with race condition**********/ + +struct semaphore enc_core_sem; +DECLARE_WAIT_QUEUE_HEAD(hw_queue); +DEFINE_SPINLOCK(owner_lock); +DECLARE_WAIT_QUEUE_HEAD(enc_wait_queue); + +/*------------------------------------------------------------------------ +*****************************PORTING LAYER******************************** +-------------------------------------------------------------------------*/ +#define RESOURCE_SHARED_INTER_SUBSYS 0 /*0:no resource sharing inter subsystems 1: existing resource sharing*/ +#define SUBSYS_0_IO_ADDR 0x90000 /*customer specify according to own platform*/ +#define SUBSYS_0_IO_SIZE (1024 * 4) /* bytes */ + +#define SUBSYS_1_IO_ADDR 0xA0000 /*customer specify according to own platform*/ +#define SUBSYS_1_IO_SIZE (20000 * 4) /* bytes */ + +#define INT_PIN_SUBSYS_0_VC8000E -1 +#define INT_PIN_SUBSYS_0_CUTREE -1 +#define INT_PIN_SUBSYS_0_DEC400 -1 +#define INT_PIN_SUBSYS_0_L2CACHE -1 +#define INT_PIN_SUBSYS_1_VC8000E -1 +#define INT_PIN_SUBSYS_1_CUTREE -1 +#define INT_PIN_SUBSYS_1_DEC400 -1 + + +/*for all subsystem, the subsys info should be listed here for subsequent use*/ +/*base_addr, iosize, resource_shared*/ +SUBSYS_CONFIG subsys_array[]= { + {SUBSYS_0_IO_ADDR, SUBSYS_0_IO_SIZE, RESOURCE_SHARED_INTER_SUBSYS}, //subsys_0 + //{SUBSYS_1_IO_ADDR, SUBSYS_1_IO_SIZE, RESOURCE_SHARED_INTER_SUBSYS}, //subsys_1 +}; + +/*here config every core in all subsystem*/ +/*NOTE: no matter what format(HEVC/H264/JPEG/AV1/...) is supported in VC8000E, just use [CORE_VC8000E] to indicate it's a VC8000E core*/ +/* CUTREE can work standalone, so it can be a subsytem or just one core of a subsytem.*/ +/*subsys_idx, core_type, offset, reg_size, irq*/ +CORE_CONFIG core_array[]= { + {0, CORE_VC8000E, 0x1000, 500 * 4, INT_PIN_SUBSYS_0_VC8000E}, //subsys_0_VC8000E + //{0, CORE_MMU, 0x2000, 500 * 4, INT_PIN_SUBSYS_0_VC8000E}, //subsys_0_VC8000E + //{0, CORE_AXIFE, 0x3000, 500 * 4, INT_PIN_SUBSYS_0_VC8000E}, //subsys_0_AXIFE + //{0, CORE_MMU_1, 0x4000, 500 * 4, INT_PIN_SUBSYS_0_VC8000E}, //subsys_0_VC8000E + //{0, CORE_AXIFE_1, 0x5000, 500 * 4, INT_PIN_SUBSYS_0_VC8000E}, //subsys_0_AXIFE_1 + //{0, CORE_DEC400, 0x6000, 1600 * 4, INT_PIN_SUBSYS_0_VC8000E}, //subsys_0_DEC400 + //{0, CORE_L2CACHE, 0xc000, 500 * 4, INT_PIN_SUBSYS_0_L2CACHE}, //subsys_0_l2cache + //{0, CORE_CUTREE, 0xd000, 500 * 4, INT_PIN_SUBSYS_0_L2CACHE}, //subsys_0_CUTREE + //{1, CORE_CUTREE, 0x1000, 500 * 4, INT_PIN_SUBSYS_0_CUTREE}, //CUTREE + //{1, CORE_MMU, 0x2000, 500 * 4, INT_PIN_SUBSYS_0_CUTREE}, //subsys_1_MMU + //{1, CORE_AXIFE, 0x3000, 500 * 4, INT_PIN_SUBSYS_0_CUTREE}, //subsys_1_AXIFE + +}; +/*------------------------------END-------------------------------------*/ + +/***************************TYPE AND FUNCTION DECLARATION****************/ + +/* here's all the must remember stuff */ +typedef struct +{ + SUBSYS_DATA subsys_data; //config of each core,such as base addr, iosize,etc + u32 hw_id; //VC8000E/VC8000EJ hw id to indicate project + u32 subsys_id; //subsys id for driver and sw internal use + u32 is_valid; //indicate this subsys is hantro's core or not + int pid[CORE_MAX]; //indicate which process is occupying the subsys + u32 is_reserved[CORE_MAX]; //indicate this subsys is occupied by user or not + u32 irq_received[CORE_MAX]; //indicate which core receives irq + u32 irq_status[CORE_MAX]; //IRQ status of each core + u32 job_id[CORE_MAX]; + char *buffer; + unsigned int buffsize; + volatile u8 *hwregs; + struct fasync_struct *async_queue; +} hantroenc_t; + +static int ReserveIO(void); +static void ReleaseIO(void); +//static void ResetAsic(hantroenc_t * dev); + +#ifdef hantroenc_DEBUG +static void dump_regs(unsigned long data); +#endif + +/* IRQ handler */ +#if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) +static irqreturn_t hantroenc_isr(int irq, void *dev_id, struct pt_regs *regs); +#else +static irqreturn_t hantroenc_isr(int irq, void *dev_id); +#endif + +/*********************local variable declaration*****************/ +unsigned long sram_base = 0; +unsigned int sram_size = 0; +/* and this is our MAJOR; use 0 for dynamic allocation (recommended)*/ +static int hantroenc_major = 0; +static int total_subsys_num = 0; +static int total_core_num = 0; +volatile unsigned int asic_status = 0; +/* dynamic allocation*/ +static hantroenc_t* hantroenc_data = NULL; +#ifdef IRQ_SIMULATION +struct timer_list timer0; +struct timer_list timer1; +#endif + +/******************************************************************************/ +static int CheckEncIrq(hantroenc_t *dev,u32 *core_info,u32 *irq_status, u32 *job_id) +{ + unsigned long flags; + int rdy = 0; + u8 core_type = 0; + u8 subsys_idx = 0; + + core_type = (u8)(*core_info & 0x0F); + subsys_idx = (u8)(*core_info >> 4); + + if (subsys_idx > total_subsys_num-1) + { + *core_info = -1; + *irq_status = 0; + return 1; + } + + spin_lock_irqsave(&owner_lock, flags); + + if(dev[subsys_idx].irq_received[core_type]) + { + /* reset the wait condition(s) */ + PDEBUG("check subsys[%d][%d] irq ready\n", subsys_idx, core_type); + //dev[subsys_idx].irq_received[core_type] = 0; + rdy = 1; + *core_info = subsys_idx; + *irq_status = dev[subsys_idx].irq_status[core_type]; + if(job_id != NULL) + *job_id = dev[subsys_idx].job_id[core_type]; + } + + spin_unlock_irqrestore(&owner_lock, flags); + + return rdy; +} +static unsigned int WaitEncReady(hantroenc_t *dev,u32 *core_info,u32 *irq_status) +{ + PDEBUG("WaitEncReady\n"); + + if(wait_event_interruptible(enc_wait_queue, CheckEncIrq(dev,core_info,irq_status, NULL))) + { + PDEBUG("ENC wait_event_interruptible interrupted\n"); + return -ERESTARTSYS; + } + + return 0; +} + +static int CheckEncIrqbyPolling(hantroenc_t *dev,u32 *core_info,u32 *irq_status,u32 *job_id) +{ + unsigned long flags; + int rdy = 0; + u8 core_type = 0; + u8 subsys_idx = 0; + u32 irq, hwId, majorId, wClr; + unsigned long reg_offset = 0; + u32 loop = 30; + u32 interval = 100; + u32 enable_status = 0; + + core_type = (u8)(*core_info & 0x0F); + subsys_idx = (u8)(*core_info >> 4); + + if (subsys_idx > total_subsys_num-1) + { + *core_info = -1; + *irq_status = 0; + return 1; + } + + do + { + spin_lock_irqsave(&owner_lock, flags); + if(dev[subsys_idx].is_reserved[core_type] == 0) + { + //printk(KERN_DEBUG"subsys[%d][%d] is not reserved\n", subsys_idx, core_type); + goto end_1; + } + else if(dev[subsys_idx].irq_received[core_type] && + (dev[subsys_idx].irq_status[core_type] & (ASIC_STATUS_FUSE_ERROR |ASIC_STATUS_HW_TIMEOUT|ASIC_STATUS_BUFF_FULL| + ASIC_STATUS_HW_RESET|ASIC_STATUS_ERROR|ASIC_STATUS_FRAME_READY )) ) + { + rdy = 1; + *core_info = subsys_idx; + *irq_status = dev[subsys_idx].irq_status[core_type]; + *job_id = dev[subsys_idx].job_id[core_type]; + goto end_1; + } + + reg_offset = dev[subsys_idx].subsys_data.core_info.offset[core_type]; + irq = (u32)ioread32((void *)(dev[subsys_idx].hwregs + reg_offset + 0x04)); + + enable_status = (u32)ioread32((void *)(dev[subsys_idx].hwregs + reg_offset + 20)); + + if(irq & ASIC_STATUS_ALL) + { + PDEBUG("check subsys[%d][%d] irq ready\n", subsys_idx, core_type); + if(irq & 0x20) + iowrite32(0, (void *)(dev[subsys_idx].hwregs + reg_offset + 0x14)); + + /* clear all IRQ bits. (hwId >= 0x80006100) means IRQ is cleared by writting 1 */ + hwId = ioread32((void *)dev[subsys_idx].hwregs + reg_offset); + majorId = (hwId & 0x0000FF00) >> 8; + wClr = (majorId >= 0x61) ? irq: (irq & (~0x1FD)); + iowrite32(wClr, (void *)(dev[subsys_idx].hwregs + reg_offset + 0x04)); + + rdy = 1; + *core_info = subsys_idx; + *irq_status = irq; + dev[subsys_idx].irq_received[core_type] = 1; + dev[subsys_idx].irq_status[core_type] = irq; + *job_id = dev[subsys_idx].job_id[core_type]; + goto end_1; + } + + spin_unlock_irqrestore(&owner_lock, flags); + mdelay(interval); + }while(loop--); + goto end_2; + +end_1: + spin_unlock_irqrestore(&owner_lock, flags); +end_2: + return rdy; +} + +static int CheckEncAnyIrq(hantroenc_t *dev, CORE_WAIT_OUT *out) +{ + u32 i; + int rdy = 1; + u32 core_info,irq_status, job_id; + u32 core_type = CORE_VC8000E; + + for(i = 0; i < total_subsys_num; i++) + { + if(!(dev[i].subsys_data.core_info.type_info & (1<job_id[out->irq_num] = job_id; + out->irq_status[out->irq_num] = irq_status; + //printk(KERN_DEBUG "irq_status of subsys %d job_id %d is:%x\n",i,job_id,irq_status); + out->irq_num++; + rdy = 1; + } + } + + return rdy; +} + +static unsigned int WaitEncAnyReady(hantroenc_t *dev,CORE_WAIT_OUT *out) +{ + if(wait_event_interruptible(enc_wait_queue, CheckEncAnyIrq(dev,out))) + { + PDEBUG("ENC wait_event_interruptible interrupted\n"); + return -ERESTARTSYS; + } + + return 0; +} + + +static int CheckCoreOccupation(hantroenc_t *dev, u8 core_type) +{ + int ret = 0; + unsigned long flags; + + core_type = (core_type == CORE_VC8000EJ ? CORE_VC8000E : core_type); + + spin_lock_irqsave(&owner_lock, flags); + if(!dev->is_reserved[core_type]) { + dev->is_reserved[core_type] = 1; +#ifndef MULTI_THR_TEST + dev->pid[core_type] = current->pid; +#endif + ret = 1; + PDEBUG("CheckCoreOccupation pid=%d\n",dev->pid[core_type]); + } + + spin_unlock_irqrestore(&owner_lock, flags); + + return ret; +} + +static int GetWorkableCore(hantroenc_t *dev,u32 *core_info,u32 *core_info_tmp) +{ + int ret = 0; + u32 i = 0; + u32 cores; + u8 core_type = 0; + u32 required_num = 0; + static u32 reserved_job_id = 0; + unsigned long flags; + /*input core_info[32 bit]: mode[1bit](1:all 0:specified)+amount[3bit](the needing amount -1)+reserved+core_type[8bit] + + output core_info[32 bit]: the reserved core info to user space and defined as below. + mode[1bit](1:all 0:specified)+amount[3bit](reserved total core num)+reserved+subsys_mapping[8bit] + */ + cores = *core_info; + required_num = ((cores >> CORE_INFO_AMOUNT_OFFSET)& 0x7)+1; + core_type = (u8)(cores&0xFF); + + if (*core_info_tmp == 0) + *core_info_tmp = required_num << CORE_INFO_AMOUNT_OFFSET; + else + required_num = (*core_info_tmp >> CORE_INFO_AMOUNT_OFFSET); + + PDEBUG("GetWorkableCore:required_num=%d,core_info=%x\n",required_num,*core_info); + + if(required_num) + { + /* a valid free Core with specified core type */ + for (i = 0; i < total_subsys_num; i++) + { + if (dev[i].subsys_data.core_info.type_info & (1 << core_type)) + { + core_type = (core_type == CORE_VC8000EJ ? CORE_VC8000E : core_type); + if(dev[i].is_valid && CheckCoreOccupation(&dev[i], core_type)) + { + *core_info_tmp = ((((*core_info_tmp >> CORE_INFO_AMOUNT_OFFSET)-1)<> CORE_INFO_AMOUNT_OFFSET)==0) + { + ret = 1; + spin_lock_irqsave(&owner_lock, flags); + *core_info = (reserved_job_id << 16)|(*core_info_tmp & 0xFF); + dev[i].job_id[core_type] = reserved_job_id; + reserved_job_id++; + spin_unlock_irqrestore(&owner_lock, flags); + *core_info_tmp = 0; + required_num = 0; + break; + } + } + } + } + } + else + ret = 1; + + PDEBUG("*core_info = %x\n",*core_info); + return ret; +} + +static long ReserveEncoder(hantroenc_t *dev,u32 *core_info) +{ + u32 core_info_tmp = 0; +#ifdef MULTI_THR_TEST + struct wait_list_node *wait_node; + u32 start_id=0; +#endif + + + /*If HW resources are shared inter cores, just make sure only one is using the HW*/ + if (dev[0].subsys_data.cfg.resouce_shared) + { + if (down_interruptible(&enc_core_sem)) + return -ERESTARTSYS; + } + +#ifdef MULTI_THR_TEST + while(1) + { + start_id=request_wait_node(&wait_node,start_id); + if(wait_node->sem_used==1) + { + if(GetWorkableCore(dev,core_info,&core_info_tmp)) + { + down_interruptible(&wait_node->wait_sem); + wait_node->sem_used=0; + wait_node->used_flag=0; + break; + } + else + { + start_id++; + } + } + else + { + wait_node->wait_cond = *core_info; + list_add_tail(&wait_node->wait_list,&reserve_header); + down_interruptible(&wait_node->wait_sem); + *core_info = wait_node->wait_cond; + list_del(&wait_node->wait_list); + wait_node->sem_used=0; + wait_node->used_flag=0; + break; + } + } +#else + + /* lock a core that has specified core id*/ + if(wait_event_interruptible(hw_queue, + GetWorkableCore(dev,core_info,&core_info_tmp) != 0 )) + return -ERESTARTSYS; +#endif + return 0; +} + +static void ReleaseEncoder(hantroenc_t * dev,u32 *core_info) +{ + unsigned long flags; + u8 core_type = 0, subsys_idx = 0, unCheckPid = 0; + unCheckPid = (u8)((*core_info) >> 31); +#ifdef MULTI_THR_TEST + u32 release_ok=0; + struct list_head *node; + struct wait_list_node *wait_node; + u32 core_info_tmp = 0; +#endif + subsys_idx = (u8)((*core_info&0xF0) >> 4); + core_type = (u8)(*core_info&0x0F); + + PDEBUG("ReleaseEncoder:subsys_idx=%d,core_type=%x\n",subsys_idx,core_type); + /* release specified subsys and core type */ + + if (dev[subsys_idx].subsys_data.core_info.type_info & (1 << core_type)) + { + core_type = (core_type == CORE_VC8000EJ ? CORE_VC8000E : core_type); + spin_lock_irqsave(&owner_lock, flags); + PDEBUG("subsys[%d].pid[%d]=%d,current->pid=%d\n",subsys_idx, core_type, dev[subsys_idx].pid[core_type],current->pid); +#ifdef MULTI_THR_TEST + if(dev[subsys_idx].is_reserved[core_type]) +#else + if(dev[subsys_idx].is_reserved[core_type] && (dev[subsys_idx].pid[core_type] == current->pid || unCheckPid == 1)) +#endif + { + dev[subsys_idx].pid[core_type] = -1; + dev[subsys_idx].is_reserved[core_type] = 0; + dev[subsys_idx].irq_received[core_type] = 0; + dev[subsys_idx].irq_status[core_type] = 0; + dev[subsys_idx].job_id[core_type] = 0; + spin_unlock_irqrestore(&owner_lock, flags); +#ifdef MULTI_THR_TEST + release_ok=0; + if(list_empty(&reserve_header)) + { + request_wait_sema(&wait_node); + up(&wait_node->wait_sem); + } + else + { + list_for_each(node,&reserve_header) + { + wait_node = container_of(node,struct wait_list_node,wait_list); + if((GetWorkableCore(dev,&wait_node->wait_cond,&core_info_tmp))&&(wait_node->sem_used==0)) + { + release_ok =1; + wait_node->sem_used = 1; + up(&wait_node->wait_sem); + break; + } + } + if(release_ok==0) + { + request_wait_sema(&wait_node); + up(&wait_node->wait_sem); + } + } +#endif + + } + else + { + if (dev[subsys_idx].pid[core_type] != current->pid && unCheckPid == 0) + printk(KERN_ERR "WARNING:pid(%d) is trying to release core reserved by pid(%d)\n",current->pid,dev[subsys_idx].pid[core_type]); + spin_unlock_irqrestore(&owner_lock, flags); + } + //wake_up_interruptible_all(&hw_queue); + } +#ifndef MULTI_THR_TEST + wake_up_interruptible_all(&hw_queue); +#endif + if(dev->subsys_data.cfg.resouce_shared) + up(&enc_core_sem); + + return; +} + +#ifdef IRQ_SIMULATION +static void get_random_bytes(void *buf, int nbytes); + +static void hantroenc_trigger_irq_0(unsigned long value) +{ + PDEBUG("trigger core 0 irq\n"); + del_timer(&timer0); + hantroenc_isr(0,(void *)&hantroenc_data[0]); +} +static void hantroenc_trigger_irq_1(unsigned long value) +{ + PDEBUG("trigger core 1 irq\n"); + del_timer(&timer1); + hantroenc_isr(0,(void *)&hantroenc_data[1]); +} + +#endif + +static long hantroenc_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + unsigned int tmp; +#ifdef HANTROMMU_SUPPORT + u32 i = 0; + volatile u8* mmu_hwregs[MAX_SUBSYS_NUM][2]; +#endif + + PDEBUG("ioctl cmd 0x%08ux\n", cmd); + /* + * extract the type and number bitfields, and don't encode + * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() + */ + if(_IOC_TYPE(cmd) != HANTRO_IOC_MAGIC +#ifdef HANTROMMU_SUPPORT + &&_IOC_TYPE(cmd) != HANTRO_IOC_MMU +#endif + ) + return -ENOTTY; + if((_IOC_TYPE(cmd) == HANTRO_IOC_MAGIC && + _IOC_NR(cmd) > HANTRO_IOC_MAXNR) +#ifdef HANTROMMU_SUPPORT + ||(_IOC_TYPE(cmd) == HANTRO_IOC_MMU && + _IOC_NR(cmd) > HANTRO_IOC_MMU_MAXNR) +#endif + ) + return -ENOTTY; + + /* + * the direction is a bitmask, and VERIFY_WRITE catches R/W + * transfers. `Type' is user-oriented, while + * access_ok is kernel-oriented, so the concept of "read" and + * "write" is reversed + */ + if(_IOC_DIR(cmd) & _IOC_READ) +#if KERNEL_VERSION(5,0,0) <= LINUX_VERSION_CODE + err = !access_ok((void *) arg, _IOC_SIZE(cmd)); +#else + err = !access_ok(VERIFY_WRITE, (void *) arg, _IOC_SIZE(cmd)); +#endif + else if(_IOC_DIR(cmd) & _IOC_WRITE) +#if KERNEL_VERSION(5,0,0) <= LINUX_VERSION_CODE + err = !access_ok((void *) arg, _IOC_SIZE(cmd)); +#else + err = !access_ok(VERIFY_READ, (void *) arg, _IOC_SIZE(cmd)); +#endif + if(err) + return -EFAULT; + + switch (cmd) + { + case HANTRO_IOCH_GET_VCMD_ENABLE: + { + __put_user(0, (unsigned long *) arg); + break; + } + + case HANTRO_IOCG_HWOFFSET: + { + u32 id; + __get_user(id, (u32*)arg); + + if(id >= total_subsys_num) + { + return -EFAULT; + } + __put_user(hantroenc_data[id].subsys_data.cfg.base_addr, (unsigned long *) arg); + break; + } + + case HANTRO_IOCG_HWIOSIZE: + { + u32 id; + u32 io_size; + __get_user(id, (u32*)arg); + + if(id >= total_subsys_num) + { + return -EFAULT; + } + io_size = hantroenc_data[id].subsys_data.cfg.iosize; + __put_user(io_size, (u32 *) arg); + + return 0; + } + case HANTRO_IOCG_SRAMOFFSET: + __put_user(sram_base, (unsigned long *) arg); + break; + case HANTRO_IOCG_SRAMEIOSIZE: + __put_user(sram_size, (unsigned int *) arg); + break; + case HANTRO_IOCG_CORE_NUM: + __put_user(total_subsys_num, (unsigned int *) arg); + break; + case HANTRO_IOCG_CORE_INFO: + { + u32 idx; + SUBSYS_CORE_INFO in_data; + copy_from_user(&in_data, (void*)arg, sizeof(SUBSYS_CORE_INFO)); + idx = in_data.type_info; + if (idx > total_subsys_num - 1) + return -1; + + copy_to_user((void*)arg, &hantroenc_data[idx].subsys_data.core_info, sizeof(SUBSYS_CORE_INFO)); + break; + } + case HANTRO_IOCH_ENC_RESERVE: + { + u32 core_info; + int ret; + PDEBUG("Reserve ENC Cores\n"); + __get_user(core_info, (u32*)arg); + ret = ReserveEncoder(hantroenc_data,&core_info); + if (ret == 0) + __put_user(core_info, (u32 *) arg); + return ret; + } + case HANTRO_IOCH_ENC_RELEASE: + { + u32 core_info; + __get_user(core_info, (u32*)arg); + + PDEBUG("Release ENC Core\n"); + + ReleaseEncoder(hantroenc_data,&core_info); + + break; + } + + case HANTRO_IOCG_CORE_WAIT: + { + u32 core_info; + u32 irq_status; + __get_user(core_info, (u32*)arg); + #ifdef IRQ_SIMULATION + u32 random_num; + get_random_bytes(&random_num, sizeof(u32)); + random_num = random_num%10+80; + PDEBUG("random_num=%d\n",random_num); + + /*init a timer to trigger irq*/ + if (core_info==1) + { + init_timer(&timer0); + timer0.function = &hantroenc_trigger_irq_0; + timer0.expires = jiffies + random_num*HZ/10; //the expires time is 1s + add_timer(&timer0); + } + + if (core_info==2) + { + init_timer(&timer1); + timer1.function = &hantroenc_trigger_irq_1; + timer1.expires = jiffies + random_num*HZ/10; //the expires time is 1s + add_timer(&timer1); + } + #endif + + tmp = WaitEncReady(hantroenc_data,&core_info,&irq_status); + if (tmp==0) + { + __put_user(irq_status, (unsigned int *)arg); + return core_info;//return core_id + } + else + { + return -1; + } + + break; + } + case HANTRO_IOCG_ANYCORE_WAIT: + { + CORE_WAIT_OUT out; + memset(&out, 0, sizeof(CORE_WAIT_OUT)); +#ifdef IRQ_SIMULATION + u32 random_num; + get_random_bytes(&random_num, sizeof(u32)); + random_num = random_num%10+80; + PDEBUG("random_num=%d\n",random_num); + + /*init a timer to trigger irq*/ + if (core_info==1) + { + init_timer(&timer0); + timer0.function = &hantroenc_trigger_irq_0; + timer0.expires = jiffies + random_num*HZ/10; //the expires time is 1s + add_timer(&timer0); + } + + if (core_info==2) + { + init_timer(&timer1); + timer1.function = &hantroenc_trigger_irq_1; + timer1.expires = jiffies + random_num*HZ/10; //the expires time is 1s + add_timer(&timer1); + } +#endif + + tmp = WaitEncAnyReady(hantroenc_data,&out); + if (tmp==0) + { + copy_to_user((void*)arg, &out, sizeof(CORE_WAIT_OUT)); + return 0; + } + else + { + return -1; + } + + break; + } + + default: + { +#ifdef HANTROMMU_SUPPORT + if(_IOC_TYPE(cmd) == HANTRO_IOC_MMU) + { + memset (mmu_hwregs, 0, MAX_SUBSYS_NUM*2*sizeof(u8*)); + for (i = 0; i < total_subsys_num; i++ ) + { + if(hantroenc_data[i].subsys_data.core_info.type_info & (1<private_data = (void *) dev; + + PDEBUG("dev opened\n"); + return result; +} +static int hantroenc_release(struct inode *inode, struct file *filp) +{ + hantroenc_t *dev = (hantroenc_t *) filp->private_data; + u32 core_id = 0, i = 0; + +#ifdef hantroenc_DEBUG + dump_regs((unsigned long) dev); /* dump the regs */ +#endif + unsigned long flags; + + PDEBUG("dev closed\n"); + + for (i = 0;i < total_subsys_num; i++) + { + for (core_id = 0; core_id < CORE_MAX; core_id++) + { + spin_lock_irqsave(&owner_lock, flags); + if (dev[i].is_reserved[core_id] == 1 && dev[i].pid[core_id] == current->pid) + { + dev[i].pid[core_id] = -1; + dev[i].is_reserved[core_id] = 0; + dev[i].irq_received[core_id] = 0; + dev[i].irq_status[core_id] = 0; + PDEBUG("release reserved core\n"); + } + spin_unlock_irqrestore(&owner_lock, flags); + } + } + +#ifdef HANTROMMU_SUPPORT + for(i = 0; i < total_subsys_num; i++) + { + if(!(hantroenc_data[i].subsys_data.core_info.type_info & (1<subsys_data.cfg.resouce_shared) + up(&enc_core_sem); + + return 0; +} + +/* VFS methods */ +static struct file_operations hantroenc_fops = { + .owner= THIS_MODULE, + .open = hantroenc_open, + .release = hantroenc_release, + .unlocked_ioctl = hantroenc_ioctl, + .fasync = NULL, +}; + + +/*----------------------------------------- + platform register +-----------------------------------------*/ + +static const struct of_device_id isp_of_match[] = { + { .compatible = "thead,light-vc8000e", }, + { .compatible = "xuantie,th1520-vc8000e", }, + { /* sentinel */ }, +}; + + +static int encoder_hantrodec_probe(struct platform_device *pdev) +{ + int result; + int i, j; + + struct resource *mem; + mem = platform_get_resource(pdev,IORESOURCE_MEM,0); + if(mem->start) + subsys_array[0].base_addr = mem->start; + core_array[0].irq = platform_get_irq(pdev,0); + + total_subsys_num = sizeof(subsys_array)/sizeof(SUBSYS_CONFIG); + + for (i = 0; i< total_subsys_num; i++) + { + printk(KERN_INFO "hantroenc: module init - subsys[%d] addr =%p\n",i, + (void *)subsys_array[i].base_addr); + } + + hantroenc_data = (hantroenc_t *)vmalloc(sizeof(hantroenc_t)*total_subsys_num); + if (hantroenc_data == NULL) + goto err1; + memset(hantroenc_data,0,sizeof(hantroenc_t)*total_subsys_num); + + for(i = 0; i < total_subsys_num; i++) + { + hantroenc_data[i].subsys_data.cfg = subsys_array[i]; + hantroenc_data[i].async_queue = NULL; + hantroenc_data[i].hwregs = NULL; + hantroenc_data[i].subsys_id = i; + for(j = 0; j < CORE_MAX; j++) + hantroenc_data[i].subsys_data.core_info.irq[j] = -1; + } + + total_core_num = sizeof(core_array)/sizeof(CORE_CONFIG); + for (i = 0; i < total_core_num; i++) + { + hantroenc_data[core_array[i].subsys_idx].subsys_data.core_info.type_info |= (1<<(core_array[i].core_type)); + hantroenc_data[core_array[i].subsys_idx].subsys_data.core_info.offset[core_array[i].core_type] = core_array[i].offset; + hantroenc_data[core_array[i].subsys_idx].subsys_data.core_info.regSize[core_array[i].core_type] = core_array[i].reg_size; + hantroenc_data[core_array[i].subsys_idx].subsys_data.core_info.irq[core_array[i].core_type] = core_array[i].irq; + } + + result = register_chrdev(hantroenc_major, "vc8000", &hantroenc_fops); + if(result < 0) + { + printk(KERN_INFO "hantroenc: unable to get major <%d>\n", + hantroenc_major); + goto err1; + } + else if(result != 0) /* this is for dynamic major */ + { + hantroenc_major = result; + } + + result = ReserveIO(); + if(result < 0) + { + goto err; + } + + //ResetAsic(hantroenc_data); /* reset hardware */ + + sema_init(&enc_core_sem, 1); + +#ifdef HANTROMMU_SUPPORT + /* MMU only initial once No matter how many MMU we have */ + for(i = 0; i < total_subsys_num; i++) + { + if(!(hantroenc_data[i].subsys_data.core_info.type_info & (1< busy, change your config\n", + hantroenc_data[i].subsys_data.core_info.irq[j]); + ReleaseIO(); + goto err; + } + } + else + { + printk(KERN_INFO "hantroenc: IRQ not in use!\n"); + } + } + } +#ifdef MULTI_THR_TEST + init_reserve_wait(total_subsys_num); +#endif + printk(KERN_INFO "hantroenc: module inserted. Major <%d>\n", hantroenc_major); + + return 0; + + err: + unregister_chrdev(hantroenc_major, "vc8000"); + err1: + if (hantroenc_data != NULL) + vfree(hantroenc_data); + printk(KERN_INFO "hantroenc: module not inserted\n"); + return result; +} + +static int encoder_hantrodec_remove(struct platform_device *pdev) +{ + int i=0, j = 0; +#ifdef HANTROMMU_SUPPORT + volatile u8* mmu_hwregs[MAX_SUBSYS_NUM][2]; +#endif + + for(i=0;i> 16) & 0xFFFF) != ((ENC_HW_ID1 >> 16) & 0xFFFF))) && + ((((hwid >> 16) & 0xFFFF) != ((ENC_HW_ID2 >> 16) & 0xFFFF)))) + { + printk(KERN_INFO "hantroenc: HW not found at %p\n", + (void *)hantroenc_data[i].subsys_data.cfg.base_addr); + #ifdef hantroenc_DEBUG + dump_regs((unsigned long) &hantroenc_data); + #endif + hantroenc_data[i].is_valid = 0; + ReleaseIO(); + continue; + } + hantroenc_data[i].hw_id = hwid; + hantroenc_data[i].is_valid = 1; + found_hw = 1; + + hw_cfg = (u32)ioread32((void *)hantroenc_data[i].hwregs + hantroenc_data[i].subsys_data.core_info.offset[VC8000E_core_idx] + 320); + hantroenc_data[i].subsys_data.core_info.type_info &= 0xFFFFFFFC; + if(hw_cfg & 0x88000000) + hantroenc_data[i].subsys_data.core_info.type_info |= (1< with ID <0x%08x>\n", + (void *)hantroenc_data[i].subsys_data.cfg.base_addr, hwid); + + } + + if (found_hw == 0) + { + printk(KERN_ERR "hantroenc: NO ANY HW found!!\n"); + return -1; + } + + return 0; +} + +static void ReleaseIO(void) +{ + u32 i; + for (i=0;i<=total_subsys_num;i++) + { + if (hantroenc_data[i].is_valid == 0) + continue; + if(hantroenc_data[i].hwregs) + iounmap((void *) hantroenc_data[i].hwregs); + release_mem_region(hantroenc_data[i].subsys_data.cfg.base_addr, hantroenc_data[i].subsys_data.cfg.iosize); + } +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) +irqreturn_t hantroenc_isr(int irq, void *dev_id, struct pt_regs *regs) +#else +irqreturn_t hantroenc_isr(int irq, void *dev_id) +#endif +{ + unsigned int handled = 0; + hantroenc_t *dev = (hantroenc_t *) dev_id; + u32 irq_status; + unsigned long flags; + u32 core_type = 0, i = 0; + unsigned long reg_offset = 0; + u32 hwId, majorId, wClr; + + /*get core id by irq from subsys config*/ + for (i = 0; i < CORE_MAX; i++) + { + if (dev->subsys_data.core_info.irq[i] == irq) + { + core_type = i; + reg_offset = dev->subsys_data.core_info.offset[i]; + break; + } + } + + /*If core is not reserved by any user, but irq is received, just clean it*/ + spin_lock_irqsave(&owner_lock, flags); + if (!dev->is_reserved[core_type]) + { + printk(KERN_DEBUG "hantroenc_isr:received IRQ but core is not reserved!\n"); + irq_status = (u32)ioread32((void *)(dev->hwregs + reg_offset + 0x04)); + if(irq_status & 0x01) + { + /* Disable HW when buffer over-flow happen + * HW behavior changed in over-flow + * in-pass, HW cleanup HWIF_ENC_E auto + * new version: ask SW cleanup HWIF_ENC_E when buffer over-flow + */ + if(irq_status & 0x20) + iowrite32(0, (void *)(dev->hwregs + reg_offset + 0x14)); + + /* clear all IRQ bits. (hwId >= 0x80006100) means IRQ is cleared by writting 1 */ + hwId = ioread32((void *)dev->hwregs + reg_offset); + majorId = (hwId & 0x0000FF00) >> 8; + wClr = (majorId >= 0x61) ? irq_status: (irq_status & (~0x1FD)); + iowrite32(wClr, (void *)(dev->hwregs + reg_offset + 0x04)); + } + spin_unlock_irqrestore(&owner_lock, flags); + return IRQ_HANDLED; + } + spin_unlock_irqrestore(&owner_lock, flags); + + printk(KERN_DEBUG "hantroenc_isr:received IRQ!\n"); + irq_status = (u32)ioread32((void *)(dev->hwregs + reg_offset + 0x04)); + printk(KERN_DEBUG "irq_status of subsys %d core %d is:%x\n",dev->subsys_id,core_type,irq_status); + if(irq_status & 0x01) + { + /* Disable HW when buffer over-flow happen + * HW behavior changed in over-flow + * in-pass, HW cleanup HWIF_ENC_E auto + * new version: ask SW cleanup HWIF_ENC_E when buffer over-flow + */ + if(irq_status & 0x20) + iowrite32(0, (void *)(dev->hwregs + reg_offset + 0x14)); + + /* clear all IRQ bits. (hwId >= 0x80006100) means IRQ is cleared by writting 1 */ + hwId = ioread32((void *)dev->hwregs + reg_offset); + majorId = (hwId & 0x0000FF00) >> 8; + wClr = (majorId >= 0x61) ? irq_status: (irq_status & (~0x1FD)); + iowrite32(wClr, (void *)(dev->hwregs + reg_offset + 0x04)); + + spin_lock_irqsave(&owner_lock, flags); + dev->irq_received[core_type] = 1; + dev->irq_status[core_type] = irq_status & (~0x01); + spin_unlock_irqrestore(&owner_lock, flags); + + wake_up_interruptible_all(&enc_wait_queue); + handled++; + } + if(!handled) + { + PDEBUG("IRQ received, but not hantro's!\n"); + } + return IRQ_HANDLED; +} + +#ifdef hantroenc_DEBUG +static void ResetAsic(hantroenc_t * dev) +{ + int i,n; + for (n=0;niosize; i += 4) + { + PDEBUG("\toffset %02X = %08X\n", i, ioread32(dev->hwregs + i)); + } + PDEBUG("Reg Dump End\n"); +} +#endif + + diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_vcmd_driver.c b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_vcmd_driver.c new file mode 100644 index 00000000000000..8237432f948749 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_vcmd_driver.c @@ -0,0 +1,5594 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +#include +#include +/* needed for __init,__exit directives */ +#include +/* needed for remap_page_range + SetPageReserved + ClearPageReserved +*/ +#include +/* obviously, for kmalloc */ +#include +/* for struct file_operations, register_chrdev() */ +#include +/* standard error codes */ +#include + +#include +#include +#include +#include + + +#include +/* request_irq(), free_irq() */ +#include +#include + +#include +#include +/* needed for virt_to_phys() */ +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +//vpu devfreq +#include +#include +#include + +#include "vc8000_devfreq.h" + +/* our own stuff */ +#include +#include +#include "vcmdswhwregisters.h" +#include "bidirect_list.h" +#include "vc8000_driver.h" +#undef linux +#define CREATE_TRACE_POINTS +#include "venc_trace_point.h" + +#ifdef CONFIG_TH1520_SYSTEM_MONITOR +#include +#endif + +/*------------------------------------------------------------------------ +*****************************VCMD CONFIGURATION BY CUSTOMER******************************** +-------------------------------------------------------------------------*/ +//video encoder vcmd configuration + +#define VCMD_ENC_IO_ADDR_0 0x90000 /*customer specify according to own platform*/ +#define VCMD_ENC_IO_SIZE_0 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_ENC_INT_PIN_0 -1 +#define VCMD_ENC_MODULE_TYPE_0 0 +#define VCMD_ENC_MODULE_MAIN_ADDR_0 0x1000 /*customer specify according to own platform*/ +#define VCMD_ENC_MODULE_DEC400_ADDR_0 0X2000 //0X6000 /*0xffff means no such kind of submodule*/ +#define VCMD_ENC_MODULE_L2CACHE_ADDR_0 0XFFFF +#define VCMD_ENC_MODULE_MMU0_ADDR_0 0X4000 //0X2000 +#define VCMD_ENC_MODULE_MMU1_ADDR_0 0XFFFF //0X4000 +#define VCMD_ENC_MODULE_AXIFE0_ADDR_0 0XFFFF //0X3000 +#define VCMD_ENC_MODULE_AXIFE1_ADDR_0 0XFFFF //0X5000 + +#define VCMD_ENC_IO_ADDR_1 0x91000 /*customer specify according to own platform*/ +#define VCMD_ENC_IO_SIZE_1 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_ENC_INT_PIN_1 -1 +#define VCMD_ENC_MODULE_TYPE_1 0 +#define VCMD_ENC_MODULE_MAIN_ADDR_1 0x0000 /*customer specify according to own platform*/ +#define VCMD_ENC_MODULE_DEC400_ADDR_1 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_ENC_MODULE_L2CACHE_ADDR_1 0XFFFF +#define VCMD_ENC_MODULE_MMU_ADDR_1 0XFFFF + +#define VCMD_ENC_IO_ADDR_2 0x92000 /*customer specify according to own platform*/ +#define VCMD_ENC_IO_SIZE_2 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_ENC_INT_PIN_2 -1 +#define VCMD_ENC_MODULE_TYPE_2 0 +#define VCMD_ENC_MODULE_MAIN_ADDR_2 0x0000 /*customer specify according to own platform*/ +#define VCMD_ENC_MODULE_DEC400_ADDR_2 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_ENC_MODULE_L2CACHE_ADDR_2 0XFFFF +#define VCMD_ENC_MODULE_MMU_ADDR_2 0XFFFF + +#define VCMD_ENC_IO_ADDR_3 0x93000 /*customer specify according to own platform*/ +#define VCMD_ENC_IO_SIZE_3 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_ENC_INT_PIN_3 -1 +#define VCMD_ENC_MODULE_TYPE_3 0 +#define VCMD_ENC_MODULE_MAIN_ADDR_3 0x0000 /*customer specify according to own platform*/ +#define VCMD_ENC_MODULE_DEC400_ADDR_3 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_ENC_MODULE_L2CACHE_ADDR_3 0XFFFF +#define VCMD_ENC_MODULE_MMU_ADDR_3 0XFFFF + +//video encoder cutree/IM vcmd configuration + + +#define VCMD_IM_IO_ADDR_0 0x94000 //0xA0000 /*customer specify according to own platform*/ +#define VCMD_IM_IO_SIZE_0 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_IM_INT_PIN_0 -1 +#define VCMD_IM_MODULE_TYPE_0 1 +#define VCMD_IM_MODULE_MAIN_ADDR_0 0x1000 /*customer specify according to own platform*/ +#define VCMD_IM_MODULE_DEC400_ADDR_0 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_IM_MODULE_L2CACHE_ADDR_0 0XFFFF +#define VCMD_IM_MODULE_MMU0_ADDR_0 0XFFFF //0X2000 +#define VCMD_IM_MODULE_MMU1_ADDR_0 0XFFFF +#define VCMD_IM_MODULE_AXIFE0_ADDR_0 0XFFFF //0X3000 +#define VCMD_IM_MODULE_AXIFE1_ADDR_0 0XFFFF //0XFFFF + +#define VCMD_IM_IO_ADDR_1 0xa1000 /*customer specify according to own platform*/ +#define VCMD_IM_IO_SIZE_1 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_IM_INT_PIN_1 -1 +#define VCMD_IM_MODULE_TYPE_1 1 +#define VCMD_IM_MODULE_MAIN_ADDR_1 0x0000 /*customer specify according to own platform*/ +#define VCMD_IM_MODULE_DEC400_ADDR_1 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_IM_MODULE_L2CACHE_ADDR_1 0XFFFF +#define VCMD_IM_MODULE_MMU_ADDR_1 0XFFFF + +#define VCMD_IM_IO_ADDR_2 0xa2000 /*customer specify according to own platform*/ +#define VCMD_IM_IO_SIZE_2 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_IM_INT_PIN_2 -1 +#define VCMD_IM_MODULE_TYPE_2 1 +#define VCMD_IM_MODULE_MAIN_ADDR_2 0x0000 /*customer specify according to own platform*/ +#define VCMD_IM_MODULE_DEC400_ADDR_2 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_IM_MODULE_L2CACHE_ADDR_2 0XFFFF +#define VCMD_IM_MODULE_MMU_ADDR_2 0XFFFF + +#define VCMD_IM_IO_ADDR_3 0xa3000 /*customer specify according to own platform*/ +#define VCMD_IM_IO_SIZE_3 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_IM_INT_PIN_3 -1 +#define VCMD_IM_MODULE_TYPE_3 1 +#define VCMD_IM_MODULE_MAIN_ADDR_3 0x0000 /*customer specify according to own platform*/ +#define VCMD_IM_MODULE_DEC400_ADDR_3 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_IM_MODULE_L2CACHE_ADDR_3 0XFFFF +#define VCMD_IM_MODULE_MMU_ADDR_3 0XFFFF + +//video decoder vcmd configuration + + +#define VCMD_DEC_IO_ADDR_0 0xb0000 /*customer specify according to own platform*/ +#define VCMD_DEC_IO_SIZE_0 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_DEC_INT_PIN_0 -1 +#define VCMD_DEC_MODULE_TYPE_0 2 +#define VCMD_DEC_MODULE_MAIN_ADDR_0 0x0000 /*customer specify according to own platform*/ +#define VCMD_DEC_MODULE_DEC400_ADDR_0 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_DEC_MODULE_L2CACHE_ADDR_0 0XFFFF +#define VCMD_DEC_MODULE_MMU_ADDR_0 0XFFFF + + +#define VCMD_DEC_IO_ADDR_1 0xb1000 /*customer specify according to own platform*/ +#define VCMD_DEC_IO_SIZE_1 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_DEC_INT_PIN_1 -1 +#define VCMD_DEC_MODULE_TYPE_1 2 +#define VCMD_DEC_MODULE_MAIN_ADDR_1 0x0000 /*customer specify according to own platform*/ +#define VCMD_DEC_MODULE_DEC400_ADDR_1 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_DEC_MODULE_L2CACHE_ADDR_1 0XFFFF +#define VCMD_DEC_MODULE_MMU_ADDR_1 0XFFFF + +#define VCMD_DEC_IO_ADDR_2 0xb2000 /*customer specify according to own platform*/ +#define VCMD_DEC_IO_SIZE_2 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_DEC_INT_PIN_2 -1 +#define VCMD_DEC_MODULE_TYPE_2 2 +#define VCMD_DEC_MODULE_MAIN_ADDR_2 0x0000 /*customer specify according to own platform*/ +#define VCMD_DEC_MODULE_DEC400_ADDR_2 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_DEC_MODULE_L2CACHE_ADDR_2 0XFFFF +#define VCMD_DEC_MODULE_MMU_ADDR_2 0XFFFF + +#define VCMD_DEC_IO_ADDR_3 0xb3000 /*customer specify according to own platform*/ +#define VCMD_DEC_IO_SIZE_3 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_DEC_INT_PIN_3 -1 +#define VCMD_DEC_MODULE_TYPE_3 2 +#define VCMD_DEC_MODULE_MAIN_ADDR_3 0x0000 /*customer specify according to own platform*/ +#define VCMD_DEC_MODULE_DEC400_ADDR_3 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_DEC_MODULE_L2CACHE_ADDR_3 0XFFFF +#define VCMD_DEC_MODULE_MMU_ADDR_3 0XFFFF + +//JPEG encoder vcmd configuration + +#define VCMD_JPEGE_IO_ADDR_0 0x90000 /*customer specify according to own platform*/ +#define VCMD_JPEGE_IO_SIZE_0 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_JPEGE_INT_PIN_0 -1 +#define VCMD_JPEGE_MODULE_TYPE_0 3 +#define VCMD_JPEGE_MODULE_MAIN_ADDR_0 0x1000 /*customer specify according to own platform*/ +#define VCMD_JPEGE_MODULE_DEC400_ADDR_0 0XFFFF //0X4000 /*0xffff means no such kind of submodule*/ +#define VCMD_JPEGE_MODULE_L2CACHE_ADDR_0 0XFFFF +#define VCMD_JPEGE_MODULE_MMU0_ADDR_0 0XFFFF //0X2000 +#define VCMD_JPEGE_MODULE_MMU1_ADDR_0 0XFFFF +#define VCMD_JPEGE_MODULE_AXIFE0_ADDR_0 0XFFFF //0X3000 +#define VCMD_JPEGE_MODULE_AXIFE1_ADDR_0 0XFFFF + +#define VCMD_JPEGE_IO_ADDR_1 0xC1000 /*customer specify according to own platform*/ +#define VCMD_JPEGE_IO_SIZE_1 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_JPEGE_INT_PIN_1 -1 +#define VCMD_JPEGE_MODULE_TYPE_1 3 +#define VCMD_JPEGE_MODULE_MAIN_ADDR_1 0x0000 /*customer specify according to own platform*/ +#define VCMD_JPEGE_MODULE_DEC400_ADDR_1 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_JPEGE_MODULE_L2CACHE_ADDR_1 0XFFFF +#define VCMD_JPEGE_MODULE_MMU_ADDR_1 0XFFFF + +#define VCMD_JPEGE_IO_ADDR_2 0xC2000 /*customer specify according to own platform*/ +#define VCMD_JPEGE_IO_SIZE_2 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_JPEGE_INT_PIN_2 -1 +#define VCMD_JPEGE_MODULE_TYPE_2 3 +#define VCMD_JPEGE_MODULE_MAIN_ADDR_2 0x0000 /*customer specify according to own platform*/ +#define VCMD_JPEGE_MODULE_DEC400_ADDR_2 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_JPEGE_MODULE_L2CACHE_ADDR_2 0XFFFF +#define VCMD_JPEGE_MODULE_MMU_ADDR_2 0XFFFF + +#define VCMD_JPEGE_IO_ADDR_3 0xC3000 /*customer specify according to own platform*/ +#define VCMD_JPEGE_IO_SIZE_3 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_JPEGE_INT_PIN_3 -1 +#define VCMD_JPEGE_MODULE_TYPE_3 3 +#define VCMD_JPEGE_MODULE_MAIN_ADDR_3 0x0000 /*customer specify according to own platform*/ +#define VCMD_JPEGE_MODULE_DEC400_ADDR_3 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_JPEGE_MODULE_L2CACHE_ADDR_3 0XFFFF +#define VCMD_JPEGE_MODULE_MMU_ADDR_3 0XFFFF + + +//JPEG decoder vcmd configuration + +#define VCMD_JPEGD_IO_ADDR_0 0xD0000 /*customer specify according to own platform*/ +#define VCMD_JPEGD_IO_SIZE_0 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_JPEGD_INT_PIN_0 -1 +#define VCMD_JPEGD_MODULE_TYPE_0 4 +#define VCMD_JPEGD_MODULE_MAIN_ADDR_0 0x0000 /*customer specify according to own platform*/ +#define VCMD_JPEGD_MODULE_DEC400_ADDR_0 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_JPEGD_MODULE_L2CACHE_ADDR_0 0XFFFF +#define VCMD_JPEGD_MODULE_MMU_ADDR_0 0XFFFF + +#define VCMD_JPEGD_IO_ADDR_1 0xD1000 /*customer specify according to own platform*/ +#define VCMD_JPEGD_IO_SIZE_1 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_JPEGD_INT_PIN_1 -1 +#define VCMD_JPEGD_MODULE_TYPE_1 4 +#define VCMD_JPEGD_MODULE_MAIN_ADDR_1 0x0000 /*customer specify according to own platform*/ +#define VCMD_JPEGD_MODULE_DEC400_ADDR_1 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_JPEGD_MODULE_L2CACHE_ADDR_1 0XFFFF +#define VCMD_JPEGD_MODULE_MMU_ADDR_1 0XFFFF + +#define VCMD_JPEGD_IO_ADDR_2 0xD2000 /*customer specify according to own platform*/ +#define VCMD_JPEGD_IO_SIZE_2 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_JPEGD_INT_PIN_2 -1 +#define VCMD_JPEGD_MODULE_TYPE_2 4 +#define VCMD_JPEGD_MODULE_MAIN_ADDR_2 0x0000 /*customer specify according to own platform*/ +#define VCMD_JPEGD_MODULE_DEC400_ADDR_2 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_JPEGD_MODULE_L2CACHE_ADDR_2 0XFFFF +#define VCMD_JPEGD_MODULE_MMU_ADDR_2 0XFFFF + +#define VCMD_JPEGD_IO_ADDR_3 0xD3000 /*customer specify according to own platform*/ +#define VCMD_JPEGD_IO_SIZE_3 (ASIC_VCMD_SWREG_AMOUNT * 4) /* bytes */ +#define VCMD_JPEGD_INT_PIN_3 -1 +#define VCMD_JPEGD_MODULE_TYPE_3 4 +#define VCMD_JPEGD_MODULE_MAIN_ADDR_3 0x0000 /*customer specify according to own platform*/ +#define VCMD_JPEGD_MODULE_DEC400_ADDR_3 0XFFFF /*0xffff means no such kind of submodule*/ +#define VCMD_JPEGD_MODULE_L2CACHE_ADDR_3 0XFFFF +#define VCMD_JPEGD_MODULE_MMU_ADDR_3 0XFFFF + +struct vcmd_config +{ + unsigned long vcmd_base_addr; + u32 vcmd_iosize; + int vcmd_irq; + u32 sub_module_type; /*input vc8000e=0,IM=1,vc8000d=2,jpege=3, jpegd=4*/ + u16 submodule_main_addr; // in byte + u16 submodule_dec400_addr;//if submodule addr == 0xffff, this submodule does not exist.// in byte + u16 submodule_L2Cache_addr; // in byte + u16 submodule_MMU_addr[2]; // in byte + u16 submodule_axife_addr[2]; // in byte +}; + +#define NETINT +//#define MAGVII +//#define OYB_VCEJ +//#define OYB_VCE + +/*for all vcmds, the core info should be listed here for subsequent use*/ +static struct vcmd_config vcmd_core_array[]= { +#if defined(NETINT) || defined(OYB_VCE) + //encoder configuration + {VCMD_ENC_IO_ADDR_0, + VCMD_ENC_IO_SIZE_0, + VCMD_ENC_INT_PIN_0, + VCMD_ENC_MODULE_TYPE_0, + VCMD_ENC_MODULE_MAIN_ADDR_0, + VCMD_ENC_MODULE_DEC400_ADDR_0, + VCMD_ENC_MODULE_L2CACHE_ADDR_0, + {VCMD_ENC_MODULE_MMU0_ADDR_0, + VCMD_ENC_MODULE_MMU1_ADDR_0}, + {VCMD_ENC_MODULE_AXIFE0_ADDR_0, + VCMD_ENC_MODULE_AXIFE1_ADDR_0}}, +#endif +#if 0 + {VCMD_ENC_IO_ADDR_1, + VCMD_ENC_IO_SIZE_1, + VCMD_ENC_INT_PIN_1, + VCMD_ENC_MODULE_TYPE_1, + VCMD_ENC_MODULE_MAIN_ADDR_1, + VCMD_ENC_MODULE_DEC400_ADDR_1, + VCMD_ENC_MODULE_L2CACHE_ADDR_1, + VCMD_ENC_MODULE_MMU_ADDR_1}, + + {VCMD_ENC_IO_ADDR_2, + VCMD_ENC_IO_SIZE_2, + VCMD_ENC_INT_PIN_2, + VCMD_ENC_MODULE_TYPE_2, + VCMD_ENC_MODULE_MAIN_ADDR_2, + VCMD_ENC_MODULE_DEC400_ADDR_2, + VCMD_ENC_MODULE_L2CACHE_ADDR_2, + VCMD_ENC_MODULE_MMU_ADDR_2}, + + {VCMD_ENC_IO_ADDR_3, + VCMD_ENC_IO_SIZE_3, + VCMD_ENC_INT_PIN_3, + VCMD_ENC_MODULE_TYPE_3, + VCMD_ENC_MODULE_MAIN_ADDR_3, + VCMD_ENC_MODULE_DEC400_ADDR_3, + VCMD_ENC_MODULE_L2CACHE_ADDR_3, + VCMD_ENC_MODULE_MMU_ADDR_3}, +#endif + //cutree/IM configuration +#if defined(NETINT) || defined(OYB_VCE) + {VCMD_IM_IO_ADDR_0, + VCMD_IM_IO_SIZE_0, + VCMD_IM_INT_PIN_0, + VCMD_IM_MODULE_TYPE_0, + VCMD_IM_MODULE_MAIN_ADDR_0, + VCMD_IM_MODULE_DEC400_ADDR_0, + VCMD_IM_MODULE_L2CACHE_ADDR_0, + {VCMD_IM_MODULE_MMU0_ADDR_0, + VCMD_IM_MODULE_MMU1_ADDR_0}, + {VCMD_IM_MODULE_AXIFE0_ADDR_0, + VCMD_IM_MODULE_AXIFE1_ADDR_0}}, +#endif +#if 0 + {VCMD_IM_IO_ADDR_1, + VCMD_IM_IO_SIZE_1, + VCMD_IM_INT_PIN_1, + VCMD_IM_MODULE_TYPE_1, + VCMD_IM_MODULE_MAIN_ADDR_1, + VCMD_IM_MODULE_DEC400_ADDR_1, + VCMD_IM_MODULE_L2CACHE_ADDR_1, + VCMD_IM_MODULE_MMU_ADDR_1}, + + {VCMD_IM_IO_ADDR_2, + VCMD_IM_IO_SIZE_2, + VCMD_IM_INT_PIN_2, + VCMD_IM_MODULE_TYPE_2, + VCMD_IM_MODULE_MAIN_ADDR_2, + VCMD_IM_MODULE_DEC400_ADDR_2, + VCMD_IM_MODULE_L2CACHE_ADDR_2, + VCMD_IM_MODULE_MMU_ADDR_2}, + + {VCMD_IM_IO_ADDR_3, + VCMD_IM_IO_SIZE_3, + VCMD_IM_INT_PIN_3, + VCMD_IM_MODULE_TYPE_3, + VCMD_IM_MODULE_MAIN_ADDR_3, + VCMD_IM_MODULE_DEC400_ADDR_3, + VCMD_IM_MODULE_L2CACHE_ADDR_3, + VCMD_IM_MODULE_MMU_ADDR_3}, + + //decoder configuration + {VCMD_DEC_IO_ADDR_0, + VCMD_DEC_IO_SIZE_0, + VCMD_DEC_INT_PIN_0, + VCMD_DEC_MODULE_TYPE_0, + VCMD_DEC_MODULE_MAIN_ADDR_0, + VCMD_DEC_MODULE_DEC400_ADDR_0, + VCMD_DEC_MODULE_L2CACHE_ADDR_0, + VCMD_DEC_MODULE_MMU_ADDR_0}, + + {VCMD_DEC_IO_ADDR_1, + VCMD_DEC_IO_SIZE_1, + VCMD_DEC_INT_PIN_1, + VCMD_DEC_MODULE_TYPE_1, + VCMD_DEC_MODULE_MAIN_ADDR_1, + VCMD_DEC_MODULE_DEC400_ADDR_1, + VCMD_DEC_MODULE_L2CACHE_ADDR_1, + VCMD_DEC_MODULE_MMU_ADDR_1}, + + {VCMD_DEC_IO_ADDR_2, + VCMD_DEC_IO_SIZE_2, + VCMD_DEC_INT_PIN_2, + VCMD_DEC_MODULE_TYPE_2, + VCMD_DEC_MODULE_MAIN_ADDR_2, + VCMD_DEC_MODULE_DEC400_ADDR_2, + VCMD_DEC_MODULE_L2CACHE_ADDR_2, + VCMD_DEC_MODULE_MMU_ADDR_2}, + + {VCMD_DEC_IO_ADDR_3, + VCMD_DEC_IO_SIZE_3, + VCMD_DEC_INT_PIN_3, + VCMD_DEC_MODULE_TYPE_3, + VCMD_DEC_MODULE_MAIN_ADDR_3, + VCMD_DEC_MODULE_DEC400_ADDR_3, + VCMD_DEC_MODULE_L2CACHE_ADDR_3, + VCMD_DEC_MODULE_MMU_ADDR_3}, +#endif +#if defined(MAGVII) || defined(OYB_VCEJ) + //JPEG encoder configuration + {VCMD_JPEGE_IO_ADDR_0, + VCMD_JPEGE_IO_SIZE_0, + VCMD_JPEGE_INT_PIN_0, + VCMD_JPEGE_MODULE_TYPE_0, + VCMD_JPEGE_MODULE_MAIN_ADDR_0, + VCMD_JPEGE_MODULE_DEC400_ADDR_0, + VCMD_JPEGE_MODULE_L2CACHE_ADDR_0, + {VCMD_JPEGE_MODULE_MMU0_ADDR_0, + VCMD_JPEGE_MODULE_MMU1_ADDR_0}, + {VCMD_JPEGE_MODULE_AXIFE0_ADDR_0, + VCMD_JPEGE_MODULE_AXIFE1_ADDR_0}}, +#endif +#if 0 + {VCMD_JPEGE_IO_ADDR_1, + VCMD_JPEGE_IO_SIZE_1, + VCMD_JPEGE_INT_PIN_1, + VCMD_JPEGE_MODULE_TYPE_1, + VCMD_JPEGE_MODULE_MAIN_ADDR_1, + VCMD_JPEGE_MODULE_DEC400_ADDR_1, + VCMD_JPEGE_MODULE_L2CACHE_ADDR_1, + VCMD_JPEGE_MODULE_MMU_ADDR_1}, + + {VCMD_JPEGE_IO_ADDR_2, + VCMD_JPEGE_IO_SIZE_2, + VCMD_JPEGE_INT_PIN_2, + VCMD_JPEGE_MODULE_TYPE_2, + VCMD_JPEGE_MODULE_MAIN_ADDR_2, + VCMD_JPEGE_MODULE_DEC400_ADDR_2, + VCMD_JPEGE_MODULE_L2CACHE_ADDR_2, + VCMD_JPEGE_MODULE_MMU_ADDR_2}, + + {VCMD_JPEGE_IO_ADDR_3, + VCMD_JPEGE_IO_SIZE_3, + VCMD_JPEGE_INT_PIN_3, + VCMD_JPEGE_MODULE_TYPE_3, + VCMD_JPEGE_MODULE_MAIN_ADDR_3, + VCMD_JPEGE_MODULE_DEC400_ADDR_3, + VCMD_JPEGE_MODULE_L2CACHE_ADDR_3, + VCMD_JPEGE_MODULE_MMU_ADDR_3}, + //JPEG decoder configuration + {VCMD_JPEGD_IO_ADDR_0, + VCMD_JPEGD_IO_SIZE_0, + VCMD_JPEGD_INT_PIN_0, + VCMD_JPEGD_MODULE_TYPE_0, + VCMD_JPEGD_MODULE_MAIN_ADDR_0, + VCMD_JPEGD_MODULE_DEC400_ADDR_0, + VCMD_JPEGD_MODULE_L2CACHE_ADDR_0, + VCMD_JPEGD_MODULE_MMU_ADDR_0}, + + {VCMD_JPEGD_IO_ADDR_1, + VCMD_JPEGD_IO_SIZE_1, + VCMD_JPEGD_INT_PIN_1, + VCMD_JPEGD_MODULE_TYPE_1, + VCMD_JPEGD_MODULE_MAIN_ADDR_1, + VCMD_JPEGD_MODULE_DEC400_ADDR_1, + VCMD_JPEGD_MODULE_L2CACHE_ADDR_1, + VCMD_JPEGD_MODULE_MMU_ADDR_1}, + + {VCMD_JPEGD_IO_ADDR_2, + VCMD_JPEGD_IO_SIZE_2, + VCMD_JPEGD_INT_PIN_2, + VCMD_JPEGD_MODULE_TYPE_2, + VCMD_JPEGD_MODULE_MAIN_ADDR_2, + VCMD_JPEGD_MODULE_DEC400_ADDR_2, + VCMD_JPEGD_MODULE_L2CACHE_ADDR_2, + VCMD_JPEGD_MODULE_MMU_ADDR_2}, + + {VCMD_JPEGD_IO_ADDR_3, + VCMD_JPEGD_IO_SIZE_3, + VCMD_JPEGD_INT_PIN_3, + VCMD_JPEGD_MODULE_TYPE_3, + VCMD_JPEGD_MODULE_MAIN_ADDR_3, + VCMD_JPEGD_MODULE_DEC400_ADDR_3, + VCMD_JPEGD_MODULE_L2CACHE_ADDR_3, + VCMD_JPEGD_MODULE_MMU_ADDR_3}, +#endif +}; +/*these size need to be modified according to hw config.*/ +#define VCMD_ENCODER_REGISTER_SIZE (479 * 4) +#define VCMD_DECODER_REGISTER_SIZE (512 * 4) +#define VCMD_IM_REGISTER_SIZE (479 * 4) +#define VCMD_JPEG_ENCODER_REGISTER_SIZE (479 * 4) +#define VCMD_JPEG_DECODER_REGISTER_SIZE (512 * 4) + +#define MAX_VCMD_NUMBER (MAX_VCMD_TYPE*MAX_SAME_MODULE_TYPE_CORE_NUMBER) // + + +#define HW_WORK_STATE_PEND 3 + +#define MAX_CMDBUF_INT_NUMBER 1 +#define INT_MIN_SUM_OF_IMAGE_SIZE (4096*2160*MAX_SAME_MODULE_TYPE_CORE_NUMBER*MAX_CMDBUF_INT_NUMBER) +#define MAX_PROCESS_CORE_NUMBER 4*8 +#define PROCESS_MAX_SUM_OF_IMAGE_SIZE (4096*2160*MAX_SAME_MODULE_TYPE_CORE_NUMBER*MAX_PROCESS_CORE_NUMBER) + + +#define MAX_SAME_MODULE_TYPE_CORE_NUMBER 4 + +#define VC8000E_MAX_CONFIG_LEN 32 + +#define VC8000E_PM_TIMEOUT 100 /* ms */ + +static size_t base_ddr_addr = 0; /*pcie address need to substract this value then can be put to register*/ + +#ifdef HANTROAXIFE_SUPPORT +#define AXIFE_SIZE (64*4) +volatile u8* axife_hwregs[MAX_VCMD_NUMBER][2]; +#endif + +#ifdef HANTROMMU_SUPPORT +#define MMU_SIZE (228*4) +extern unsigned int mmu_enable; +extern unsigned long gBaseDDRHw; +static volatile u8* mmu_hwregs[MAX_VCMD_NUMBER][2]; +#else +static unsigned int mmu_enable = 0; +#endif + +/********variables declaration related with race condition**********/ + +#define CMDBUF_MAX_SIZE (512*4*4) + + +#define CMDBUF_POOL_TOTAL_SIZE (2*1024*1024) //approximately=128x(320x240)=128x2k=128x8kbyte=1Mbytes +#define TOTAL_DISCRETE_CMDBUF_NUM (CMDBUF_POOL_TOTAL_SIZE/CMDBUF_MAX_SIZE) +#define CMDBUF_VCMD_REGISTER_TOTAL_SIZE 9*1024*1024-CMDBUF_POOL_TOTAL_SIZE*2 +#define VCMD_REGISTER_SIZE (128*4) + +#ifndef DYNAMIC_MALLOC_VCMDNODE +static struct cmdbuf_obj *g_cmdbuf_obj_pool; +static struct bi_list_node *g_cmdbuf_node_pool; +#endif + +int debug_pr_devfreq_info = 0; + +struct noncache_mem +{ + u32 *virtualAddress; + dma_addr_t busAddress; + unsigned int mmu_bus_address; /* buffer physical address in MMU*/ + u32 size; + u16 cmdbuf_id; +}; + +struct process_manager_obj +{ + struct file *filp; + u32 total_exe_time; + u32 pm_count; + spinlock_t spinlock; + wait_queue_head_t wait_queue; +} ; + +struct cmdbuf_obj +{ + u32 module_type; //current CMDBUF type: input vc8000e=0,IM=1,vc8000d=2,jpege=3, jpegd=4 + u32 priority; //current CMDBUFpriority: normal=0, high=1 + u32 executing_time; //current CMDBUFexecuting_time=encoded_image_size*(rdoLevel+1)*(rdoq+1); + u32 cmdbuf_size; //current CMDBUF size + u32 *cmdbuf_virtualAddress; //current CMDBUF start virtual address. + size_t cmdbuf_busAddress; //current CMDBUF start physical address. + unsigned int mmu_cmdbuf_busAddress; //current CMDBUF start mmu mapping address. + u32 *status_virtualAddress; //current status CMDBUF start virtual address. + size_t status_busAddress; //current status CMDBUF start physical address. + unsigned int mmu_status_busAddress; //current status CMDBUF start mmu mapping address. + u32 status_size; //current status CMDBUF size + u32 executing_status; //current CMDBUF executing status. + struct file *filp; //file pointer in the same process. + u16 core_id; //which vcmd core is used. + u16 cmdbuf_id; //used to manage CMDBUF in driver.It is a handle to identify cmdbuf.also is an interrupt vector.position in pool,same as status position. + u8 cmdbuf_data_loaded; //0 means sw has not copied data into this CMDBUF; 1 means sw has copied data into this CMDBUF + u8 cmdbuf_data_linked; //0 :not linked, 1:linked.into a vcmd core list. + volatile u8 cmdbuf_run_done; //if 0,waiting for CMDBUF finish; if 1, op code in CMDBUF has finished one by one. HANTRO_VCMD_IOCH_WAIT_CMDBUF will check this variable. + u8 cmdbuf_need_remove; // if 0, not need to remove CMDBUF; 1 CMDBUF can be removed if it is not the last CMDBUF; + u8 has_end_cmdbuf; //if 1, the last opcode is end opCode. + u8 no_normal_int_cmdbuf; //if 1, JMP will not send normal interrupt. + struct process_manager_obj* process_manager_obj; +}; + +struct hantrovcmd_dev +{ + struct vcmd_config vcmd_core_cfg; //config of each core,such as base addr, irq,etc + u32 core_id; //vcmd core id for driver and sw internal use + u32 sw_cmdbuf_rdy_num; + spinlock_t* spinlock; + wait_queue_head_t * wait_queue; + wait_queue_head_t * wait_abort_queue; + bi_list list_manager; + volatile u8 *hwregs;/* IO mem base */ + u32 reg_mirror[ASIC_VCMD_SWREG_AMOUNT]; + u32 duration_without_int; //number of cmdbufs without interrupt. + volatile u8 working_state; + u32 total_exe_time; + u16 status_cmdbuf_id;//used for analyse configuration in cwl. + u32 hw_version_id; /*megvii 0x43421001, later 0x43421102*/ + u32 *vcmd_reg_mem_virtualAddress;//start virtual address of vcmd registers memory of CMDBUF. + size_t vcmd_reg_mem_busAddress; //start physical address of vcmd registers memory of CMDBUF. + unsigned int mmu_vcmd_reg_mem_busAddress; //start mmu mapping address of vcmd registers memory of CMDBUF. + u32 vcmd_reg_mem_size; // size of vcmd registers memory of CMDBUF. + struct platform_device *pdev; + struct clk *cclk; + struct clk *aclk; + struct clk *pclk; + char config_buf[VC8000E_MAX_CONFIG_LEN]; + int has_power_domains; + bi_list_node* last_linked_cmdbuf_node; + bi_list_node* suspend_running_cmdbuf_node; + bool suspend_entered; + struct encoder_devfreq devfreq; +} ; + +/* + * Ioctl definitions + */ +#define VCMD_HW_ID 0x4342 + + +static struct noncache_mem vcmd_buf_mem_pool; +static struct noncache_mem vcmd_status_buf_mem_pool; +static struct noncache_mem vcmd_registers_mem_pool; + +static u16 cmdbuf_used[TOTAL_DISCRETE_CMDBUF_NUM]; +static u16 cmdbuf_used_pos; +static u16 cmdbuf_used_residual; + +static struct hantrovcmd_dev* vcmd_manager[MAX_VCMD_TYPE][MAX_VCMD_NUMBER]; +static bi_list_node* global_cmdbuf_node[TOTAL_DISCRETE_CMDBUF_NUM]; + +static bi_list global_process_manager; + +static u16 vcmd_position[MAX_VCMD_TYPE]; +static int vcmd_type_core_num[MAX_VCMD_TYPE]; + +#define EXECUTING_CMDBUF_ID_ADDR 26 +#define VCMD_EXE_CMDBUF_COUNT 3 + +#define WORKING_STATE_IDLE 0 +#define WORKING_STATE_WORKING 1 +#define CMDBUF_EXE_STATUS_OK 0 +#define CMDBUF_EXE_STATUS_CMDERR 1 +#define CMDBUF_EXE_STATUS_BUSERR 2 + + +struct semaphore vcmd_reserve_cmdbuf_sem[MAX_VCMD_TYPE]; //for reserve + +//#define VCMD_DEBUG_INTERNAL + +/***************************TYPE AND FUNCTION DECLARATION****************/ + +/* here's all the must remember stuff */ + + +static int vcmd_reserve_IO(void); +static void vcmd_release_IO(void); +static void vcmd_reset_asic(struct hantrovcmd_dev * dev); +static void vcmd_reset_current_asic(struct hantrovcmd_dev * dev); +static int allocate_cmdbuf(struct noncache_mem* new_cmdbuf_addr,struct noncache_mem* new_status_cmdbuf_addr); +static void vcmd_link_cmdbuf(struct hantrovcmd_dev *dev,bi_list_node* last_linked_cmdbuf_node); +static void vcmd_start(struct hantrovcmd_dev *dev,bi_list_node* first_linked_cmdbuf_node); +static void create_kernel_process_manager(void); +static void vcmd_reset(void); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) +static irqreturn_t hantrovcmd_isr(int irq, void *dev_id, struct pt_regs *regs); +#else +static irqreturn_t hantrovcmd_isr(int irq, void *dev_id); +#endif + +#ifdef VCMD_DEBUG_INTERNAL +static void printk_vcmd_register_debug(const void *hwregs, char* info); +#endif + +/*********************local variable declaration*****************/ +static unsigned long vcmd_sram_base = 0; +static unsigned int vcmd_sram_size = 0; +/* and this is our MAJOR; use 0 for dynamic allocation (recommended)*/ +static int hantrovcmd_major = 0; +static int hantrovcmd_minor = 0; /* dynamic allocation */ +static struct cdev hantrovcmd_cdev; +static dev_t hantrovcmd_devt; +static struct class *hantrovcmd_class; +static int total_vcmd_core_num = 0; +/* dynamic allocation*/ +static struct hantrovcmd_dev* hantrovcmd_data = NULL; +static struct dentry *root_debugfs_dir = NULL; + +static int software_triger_abort=0; + +//#define IRQ_SIMULATION + +#ifdef IRQ_SIMULATION +struct timer_manager +{ + u32 core_id; //vcmd core id for driver and sw internal use + u32 timer_id; + struct timer_list *timer; +} ; + +static struct timer_list timer[10000]; +struct timer_manager timer_reserve[10000]; +#if 0 +static struct timer_list timer0; +static struct timer_list timer1; +#endif +#endif + + +//hw_queue can be used for reserve cmdbuf memory +DECLARE_WAIT_QUEUE_HEAD(vcmd_cmdbuf_memory_wait); + +DEFINE_SPINLOCK(vcmd_cmdbuf_alloc_lock); +DEFINE_SPINLOCK(vcmd_process_manager_lock); + + +static spinlock_t owner_lock_vcmd[MAX_VCMD_NUMBER]; + +static wait_queue_head_t wait_queue_vcmd[MAX_VCMD_NUMBER]; + +static wait_queue_head_t abort_queue_vcmd[MAX_VCMD_NUMBER]; + +#if 0 +/*allocate non-cacheable DMA memory*/ +#define DRIVER_NAME_HANTRO_NON_CACH_MEM "non_cach_memory" + +static struct platform_device *noncachable_mem_dev = NULL; + + +static const struct platform_device_info hantro_platform_info = { + .name = DRIVER_NAME_HANTRO_NON_CACH_MEM, + .id = -1, + .dma_mask = DMA_BIT_MASK(32), +}; + + +static int hantro_noncachable_mem_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + vcmd_buf_mem_pool.virtualAddress = dma_alloc_coherent(dev,CMDBUF_POOL_TOTAL_SIZE,&vcmd_buf_mem_pool.busAddress, GFP_KERNEL | GFP_DMA); + vcmd_buf_mem_pool.size = CMDBUF_POOL_TOTAL_SIZE; + vcmd_status_buf_mem_pool.virtualAddress = dma_alloc_coherent(dev,CMDBUF_POOL_TOTAL_SIZE,&vcmd_status_buf_mem_pool.busAddress, GFP_KERNEL | GFP_DMA); + vcmd_status_buf_mem_pool.size = CMDBUF_POOL_TOTAL_SIZE; + return 0; +} + +static int hantro_noncachable_mem_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + dma_free_coherent(dev,vcmd_buf_mem_pool.size,vcmd_buf_mem_pool.virtualAddress,vcmd_buf_mem_pool.busAddress); + dma_free_coherent(dev,vcmd_status_buf_mem_pool.size,vcmd_status_buf_mem_pool.virtualAddress,vcmd_status_buf_mem_pool.busAddress); + + return 0; +} + + +static const struct platform_device_id hantro_noncachable_mem_platform_ids[]={ + { + .name = DRIVER_NAME_HANTRO_NON_CACH_MEM, + }, + {/* sentinel */}, +}; + + +static const struct of_device_id hantro_of_match[]={ + { + .compatible = "thead,light-vc8000e", + }, + {/* sentinel */}, +}; + +static struct platform_driver hantro_noncachable_mem_platform_driver = { + .probe = hantro_noncachable_mem_probe, + .remove = hantro_noncachable_mem_remove, + .driver ={ + .name = DRIVER_NAME_HANTRO_NON_CACH_MEM, + .owner = THIS_MODULE, + .of_match_table = hantro_of_match, + }, + .id_table = hantro_noncachable_mem_platform_ids, +}; + +static void init_vcmd_non_cachable_memory_allocate(void) +{ + + /*create device: This will create a {struct platform_device}, It has a member dev, which is a {struct device} */ + noncachable_mem_dev = platform_device_register_full(&hantro_platform_info); + + /*when this function is called, the .probe callback is invoked.*/ + platform_driver_register(&hantro_noncachable_mem_platform_driver); + + +} + +static void release_vcmd_non_cachable_memory(void) +{ + + /* when this fucntion is called, .remove callback will be invoked. use it to clean up all resources allocated in .probe.*/ + platform_driver_unregister(&hantro_noncachable_mem_platform_driver); + + /*destroy the device*/ + platform_device_unregister(noncachable_mem_dev); +} +#endif +/**********************************************************************************************************\ +*cmdbuf object management +\***********************************************************************************************************/ +static struct cmdbuf_obj* create_cmdbuf_obj(void) +{ + struct cmdbuf_obj* cmdbuf_obj=NULL; + cmdbuf_obj=vmalloc(sizeof(struct cmdbuf_obj)); + if(cmdbuf_obj==NULL) + { + PDEBUG ("%s\n","vmalloc for cmdbuf_obj fail!"); + return cmdbuf_obj; + } + memset(cmdbuf_obj,0,sizeof(struct cmdbuf_obj)); + return cmdbuf_obj; +} + +static void free_cmdbuf_obj(struct cmdbuf_obj* cmdbuf_obj) +{ +#ifdef DYNAMIC_MALLOC_VCMDNODE + if(cmdbuf_obj==NULL) + { + PDEBUG ("%s\n","remove_cmdbuf_obj NULL"); + return; + } + //free current cmdbuf_obj + vfree(cmdbuf_obj); + return; +#endif +} + +static struct cmdbuf_obj *create_vcmd_cmdbuf_obj(u16 cmdbuf_id) +{ + struct cmdbuf_obj *cmdbuf_obj = NULL; + +#ifdef DYNAMIC_MALLOC_VCMDNODE + cmdbuf_obj = create_cmdbuf_obj(); +#else + cmdbuf_obj = g_cmdbuf_obj_pool + cmdbuf_id; + if (cmdbuf_obj) + memset(cmdbuf_obj, 0, sizeof(struct cmdbuf_obj)); +#endif + return cmdbuf_obj; +} + +static bi_list_node *bi_list_create_vcmd_node(u16 cmdbuf_id) +{ + bi_list_node *node = NULL; + +#ifdef DYNAMIC_MALLOC_VCMDNODE + node = bi_list_create_node(); +#else + node = g_cmdbuf_node_pool + cmdbuf_id; + if (node) + memset(node, 0, sizeof(bi_list_node)); +#endif + return node; +} + +static void free_cmdbuf_mem(u16 cmdbuf_id ) +{ + unsigned long flags; + + spin_lock_irqsave(&vcmd_cmdbuf_alloc_lock, flags); + cmdbuf_used[cmdbuf_id]=0; + cmdbuf_used_residual +=1; + PDEBUG(" ## real free cmdbuf[%d]\n",cmdbuf_id); + spin_unlock_irqrestore(&vcmd_cmdbuf_alloc_lock, flags); + wake_up_all(&vcmd_cmdbuf_memory_wait); +} + +static void free_cmdbuf_not_linked_by_flip(struct file *filp) +{ + unsigned long flags; + int i; + struct cmdbuf_obj* cmdbuf_obj; + bi_list_node* new_cmdbuf_node; + bool freed_flag = false; + spin_lock_irqsave(&vcmd_cmdbuf_alloc_lock, flags); + + for(i = 0; i < TOTAL_DISCRETE_CMDBUF_NUM; i++) + { + if(cmdbuf_used[i] && global_cmdbuf_node[i] != NULL) + { + new_cmdbuf_node = global_cmdbuf_node[i]; + if(new_cmdbuf_node == 0x55555555) + { + continue; + } + cmdbuf_obj = (struct cmdbuf_obj* )new_cmdbuf_node->data; + if(cmdbuf_obj->filp == filp && !cmdbuf_obj->cmdbuf_data_linked && !cmdbuf_obj->cmdbuf_run_done) + { + cmdbuf_used[i]=0; + cmdbuf_used_residual +=1; + global_cmdbuf_node[i] = NULL; + freed_flag = true; + PDEBUG(" ## Find left node not freed,real free cmdbuf[%d],remain %d\n",i,cmdbuf_used_residual); + } + } + + if(cmdbuf_used_residual >= (TOTAL_DISCRETE_CMDBUF_NUM-2)) + break; + + } + spin_unlock_irqrestore(&vcmd_cmdbuf_alloc_lock, flags); + if(freed_flag) + wake_up_all(&vcmd_cmdbuf_memory_wait); +} + +static bi_list_node* create_cmdbuf_node(void) +{ + bi_list_node* current_node=NULL; + struct cmdbuf_obj* cmdbuf_obj=NULL; + struct noncache_mem new_cmdbuf_addr; + struct noncache_mem new_status_cmdbuf_addr; + + if(!wait_event_interruptible_timeout(vcmd_cmdbuf_memory_wait, allocate_cmdbuf(&new_cmdbuf_addr,&new_status_cmdbuf_addr), + msecs_to_jiffies(500)) ) + { + pr_err("vc8000e: wait allocate_cmdbuf timeout\n"); + return NULL; + } + + cmdbuf_obj = create_vcmd_cmdbuf_obj(new_cmdbuf_addr.cmdbuf_id); + if(cmdbuf_obj==NULL) + { + PDEBUG ("%s\n","create_vcmd_cmdbuf_obj fail!"); + free_cmdbuf_mem(new_cmdbuf_addr.cmdbuf_id); + return NULL; + } + cmdbuf_obj->cmdbuf_busAddress = new_cmdbuf_addr.busAddress; + cmdbuf_obj->mmu_cmdbuf_busAddress = new_cmdbuf_addr.mmu_bus_address; + cmdbuf_obj->cmdbuf_virtualAddress = new_cmdbuf_addr.virtualAddress; + cmdbuf_obj->cmdbuf_size = new_cmdbuf_addr.size; + cmdbuf_obj->cmdbuf_id = new_cmdbuf_addr.cmdbuf_id; + cmdbuf_obj->status_busAddress = new_status_cmdbuf_addr.busAddress; + cmdbuf_obj->mmu_status_busAddress = new_status_cmdbuf_addr.mmu_bus_address; + cmdbuf_obj->status_virtualAddress = new_status_cmdbuf_addr.virtualAddress; + cmdbuf_obj->status_size = new_status_cmdbuf_addr.size; + current_node=bi_list_create_vcmd_node(cmdbuf_obj->cmdbuf_id); + if(current_node==NULL) + { + PDEBUG ("%s\n","bi_list_create_vcmd_node fail!"); + free_cmdbuf_mem(new_cmdbuf_addr.cmdbuf_id); + free_cmdbuf_obj(cmdbuf_obj); + return NULL; + } + current_node->data = (void*)cmdbuf_obj; + current_node->next = NULL; + current_node->previous = NULL; + return current_node; +} +static void free_cmdbuf_node(bi_list_node* cmdbuf_node) +{ + struct cmdbuf_obj* cmdbuf_obj=NULL; + if(cmdbuf_node==NULL) + { + PDEBUG ("%s\n","remove_cmdbuf_node NULL"); + return; + } + cmdbuf_obj = (struct cmdbuf_obj*)cmdbuf_node->data; + //free cmdbuf mem in pool + free_cmdbuf_mem(cmdbuf_obj->cmdbuf_id); + + //free struct cmdbuf_obj + free_cmdbuf_obj(cmdbuf_obj); +#ifdef DYNAMIC_MALLOC_VCMDNODE + //free current cmdbuf_node entity. + bi_list_free_node(cmdbuf_node); +#endif + return; +} + +//just remove, not free the node. +static bi_list_node* remove_cmdbuf_node_from_list(bi_list* list,bi_list_node* cmdbuf_node) +{ + if(cmdbuf_node==NULL) + { + PDEBUG ("%s\n","remove_cmdbuf_node_from_list NULL"); + return NULL; + } + if(cmdbuf_node->next) + { + bi_list_remove_node(list,cmdbuf_node); + return cmdbuf_node; + } + else + { + //the last one, should not be removed. + return NULL; + } +} +//calculate executing_time of each vcmd +static u32 calculate_executing_time_after_node(bi_list_node* exe_cmdbuf_node) +{ + u32 time_run_all=0; + struct cmdbuf_obj* cmdbuf_obj_temp=NULL; + while(1) + { + if(exe_cmdbuf_node==NULL) + break; + cmdbuf_obj_temp=(struct cmdbuf_obj* )exe_cmdbuf_node->data; + time_run_all += cmdbuf_obj_temp->executing_time; + exe_cmdbuf_node = exe_cmdbuf_node->next; + } + return time_run_all; +} +static u32 calculate_executing_time_after_node_high_priority(bi_list_node* exe_cmdbuf_node) +{ + u32 time_run_all=0; + struct cmdbuf_obj* cmdbuf_obj_temp=NULL; + if(exe_cmdbuf_node==NULL) + return time_run_all; + cmdbuf_obj_temp=(struct cmdbuf_obj* )exe_cmdbuf_node->data; + time_run_all += cmdbuf_obj_temp->executing_time; + exe_cmdbuf_node = exe_cmdbuf_node->next; + while(1) + { + if(exe_cmdbuf_node==NULL) + break; + cmdbuf_obj_temp=(struct cmdbuf_obj* )exe_cmdbuf_node->data; + if(cmdbuf_obj_temp->priority==CMDBUF_PRIORITY_NORMAL) + break; + time_run_all += cmdbuf_obj_temp->executing_time; + exe_cmdbuf_node = exe_cmdbuf_node->next; + } + return time_run_all; +} +#ifdef DEBUG_CMDBUF_ALLOC +int cmdbuf_node_unexpected[TOTAL_DISCRETE_CMDBUF_NUM]; +#endif +/**********************************************************************************************************\ +*cmdbuf pool management +\***********************************************************************************************************/ +static int allocate_cmdbuf(struct noncache_mem* new_cmdbuf_addr,struct noncache_mem* new_status_cmdbuf_addr) +{ + unsigned long flags; + u16 cmdbuf_used_pos_start = cmdbuf_used_pos; + spin_lock_irqsave(&vcmd_cmdbuf_alloc_lock, flags); + if(cmdbuf_used_residual==0) + { + spin_unlock_irqrestore(&vcmd_cmdbuf_alloc_lock, flags); + pr_err("%s:vc8000e:no empty cmdbuf\n",__func__); + //no empty cmdbuf + return 0; + } + //there is one cmdbuf at least + while(1) + { + if(cmdbuf_used[cmdbuf_used_pos]==0&&(global_cmdbuf_node[cmdbuf_used_pos]==NULL )) + { + cmdbuf_used[cmdbuf_used_pos]=1; + cmdbuf_used_residual -=1; + new_cmdbuf_addr->virtualAddress=vcmd_buf_mem_pool.virtualAddress + cmdbuf_used_pos*CMDBUF_MAX_SIZE/4; + new_cmdbuf_addr->busAddress=vcmd_buf_mem_pool.busAddress + cmdbuf_used_pos*CMDBUF_MAX_SIZE; + new_cmdbuf_addr->mmu_bus_address=vcmd_buf_mem_pool.mmu_bus_address + cmdbuf_used_pos*CMDBUF_MAX_SIZE; + new_cmdbuf_addr->size=CMDBUF_MAX_SIZE; + new_cmdbuf_addr->cmdbuf_id = cmdbuf_used_pos; + new_status_cmdbuf_addr->virtualAddress=vcmd_status_buf_mem_pool.virtualAddress + cmdbuf_used_pos*CMDBUF_MAX_SIZE/4; + new_status_cmdbuf_addr->busAddress=vcmd_status_buf_mem_pool.busAddress + cmdbuf_used_pos*CMDBUF_MAX_SIZE; + new_status_cmdbuf_addr->mmu_bus_address=vcmd_status_buf_mem_pool.mmu_bus_address + cmdbuf_used_pos*CMDBUF_MAX_SIZE; + new_status_cmdbuf_addr->size=CMDBUF_MAX_SIZE; + new_status_cmdbuf_addr->cmdbuf_id = cmdbuf_used_pos; + global_cmdbuf_node[cmdbuf_used_pos]=0x55555555; //temp set it,for another thread not hit cmdbuf_used[x] set but global_cmdbuf_node[x] is null + cmdbuf_used_pos++; + if(cmdbuf_used_pos>=TOTAL_DISCRETE_CMDBUF_NUM) + cmdbuf_used_pos=0; + spin_unlock_irqrestore(&vcmd_cmdbuf_alloc_lock, flags); + return 1; + } + else + { + #ifdef DEBUG_CMDBUF_ALLOC + if(cmdbuf_used[cmdbuf_used_pos]==0 && (global_cmdbuf_node[cmdbuf_used_pos]!=NULL ) && (cmdbuf_node_unexpected[cmdbuf_used_pos] == 0x55)) + { + pr_warn("vc8000e:unexpected cmdbuf_used[%d] is 0,but global_cmdbuf_node[%d] != NULL\n", + cmdbuf_used_pos,cmdbuf_used_pos); + cmdbuf_node_unexpected[cmdbuf_used_pos] = 0x55; + } + else if(cmdbuf_used[cmdbuf_used_pos]!=0 && (global_cmdbuf_node[cmdbuf_used_pos] == NULL ) && (cmdbuf_node_unexpected[cmdbuf_used_pos] == 0x55)) + { + pr_warn("vc8000e:unexpected cmdbuf_used[%d] != 0,but global_cmdbuf_node[%d] is NULL\n", + cmdbuf_used_pos,cmdbuf_used_pos); + cmdbuf_node_unexpected[cmdbuf_used_pos] = 0x55; + } + #endif + cmdbuf_used_pos++; + if(cmdbuf_used_pos>=TOTAL_DISCRETE_CMDBUF_NUM) + cmdbuf_used_pos=0; + + if(cmdbuf_used_pos_start == cmdbuf_used_pos) //searched all,not find one,should return; + break; + } + } + spin_unlock_irqrestore(&vcmd_cmdbuf_alloc_lock, flags); + pr_err("%s:vc8000e: no cmdbuf found,cmdbuf_used_residual %d\n",__func__,cmdbuf_used_residual); + return 0; +} + +static bi_list_node* get_cmdbuf_node_in_list_by_addr(size_t cmdbuf_addr,bi_list* list) +{ + bi_list_node* new_cmdbuf_node=NULL; + struct cmdbuf_obj* cmdbuf_obj=NULL; + new_cmdbuf_node=list->head; + while(1) + { + if(new_cmdbuf_node==NULL) + return NULL; + cmdbuf_obj=(struct cmdbuf_obj*)new_cmdbuf_node->data; + if(((cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr) <=cmdbuf_addr)&&(((cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr+cmdbuf_obj->cmdbuf_size) >cmdbuf_addr)) ) + { + return new_cmdbuf_node; + } + new_cmdbuf_node=new_cmdbuf_node->next; + } + return NULL; +} + +static int wait_abort_rdy(struct hantrovcmd_dev*dev) +{ + return dev->working_state == WORKING_STATE_IDLE; +} +static int select_vcmd(bi_list_node* new_cmdbuf_node) +{ + struct cmdbuf_obj* cmdbuf_obj=NULL; + bi_list_node* curr_cmdbuf_node=NULL; + bi_list* list=NULL; + struct hantrovcmd_dev*dev=NULL; + struct hantrovcmd_dev*smallest_dev=NULL; + u32 executing_time=0xffff; + int counter=0; + unsigned long flags=0; + u32 hw_rdy_cmdbuf_num=0; + size_t exe_cmdbuf_addr=0; + struct cmdbuf_obj* cmdbuf_obj_temp=NULL; + u32 cmdbuf_id=0; + cmdbuf_obj=(struct cmdbuf_obj*)new_cmdbuf_node->data; + //there is an empty vcmd to be used + while(1) + { + dev = vcmd_manager[cmdbuf_obj->module_type][vcmd_position[cmdbuf_obj->module_type]]; + list=&dev->list_manager; + spin_lock_irqsave(dev->spinlock, flags); + if( list->tail==NULL) + { + bi_list_insert_node_tail(list,new_cmdbuf_node); + spin_unlock_irqrestore(dev->spinlock, flags); + vcmd_position[cmdbuf_obj->module_type]++; + if(vcmd_position[cmdbuf_obj->module_type]>=vcmd_type_core_num[cmdbuf_obj->module_type]) + vcmd_position[cmdbuf_obj->module_type]=0; + cmdbuf_obj->core_id = dev->core_id; + return 0; + } + else + { + spin_unlock_irqrestore(dev->spinlock, flags); + vcmd_position[cmdbuf_obj->module_type]++; + if(vcmd_position[cmdbuf_obj->module_type]>=vcmd_type_core_num[cmdbuf_obj->module_type]) + vcmd_position[cmdbuf_obj->module_type]=0; + counter++; + } + if(counter>=vcmd_type_core_num[cmdbuf_obj->module_type]) + break; + } + //there is a vcmd which tail node -> cmdbuf_run_done == 1. It means this vcmd has nothing to do, so we select it. + counter =0; + while(1) + { + dev = vcmd_manager[cmdbuf_obj->module_type][vcmd_position[cmdbuf_obj->module_type]]; + list=&dev->list_manager; + spin_lock_irqsave(dev->spinlock, flags); + curr_cmdbuf_node = list->tail; + if(curr_cmdbuf_node == NULL) + { + bi_list_insert_node_tail(list,new_cmdbuf_node); + spin_unlock_irqrestore(dev->spinlock, flags); + vcmd_position[cmdbuf_obj->module_type]++; + if(vcmd_position[cmdbuf_obj->module_type]>=vcmd_type_core_num[cmdbuf_obj->module_type]) + vcmd_position[cmdbuf_obj->module_type]=0; + cmdbuf_obj->core_id = dev->core_id; + return 0; + } + cmdbuf_obj_temp =(struct cmdbuf_obj*) curr_cmdbuf_node->data; + if(cmdbuf_obj_temp->cmdbuf_run_done ==1) + { + bi_list_insert_node_tail(list,new_cmdbuf_node); + spin_unlock_irqrestore(dev->spinlock, flags); + vcmd_position[cmdbuf_obj->module_type]++; + if(vcmd_position[cmdbuf_obj->module_type]>=vcmd_type_core_num[cmdbuf_obj->module_type]) + vcmd_position[cmdbuf_obj->module_type]=0; + cmdbuf_obj->core_id = dev->core_id; + return 0; + } + else + { + spin_unlock_irqrestore(dev->spinlock, flags); + vcmd_position[cmdbuf_obj->module_type]++; + if(vcmd_position[cmdbuf_obj->module_type]>=vcmd_type_core_num[cmdbuf_obj->module_type]) + vcmd_position[cmdbuf_obj->module_type]=0; + counter++; + } + if(counter>=vcmd_type_core_num[cmdbuf_obj->module_type]) + break; + } + + //another case, tail = executing node, and vcmd=pend state (finish but not generate interrupt) + counter =0; + while(1) + { + dev = vcmd_manager[cmdbuf_obj->module_type][vcmd_position[cmdbuf_obj->module_type]]; + list=&dev->list_manager; + //read executing cmdbuf address + if(dev->hw_version_id <= HW_ID_1_0_C ) + hw_rdy_cmdbuf_num = vcmd_get_register_value((const void *)dev->hwregs,dev->reg_mirror,HWIF_VCMD_EXE_CMDBUF_COUNT); + else + { + hw_rdy_cmdbuf_num = *(dev->vcmd_reg_mem_virtualAddress+VCMD_EXE_CMDBUF_COUNT); + if(hw_rdy_cmdbuf_num!=dev->sw_cmdbuf_rdy_num) + hw_rdy_cmdbuf_num += 1; + } + spin_lock_irqsave(dev->spinlock, flags); + curr_cmdbuf_node = list->tail; + if(curr_cmdbuf_node == NULL) + { + bi_list_insert_node_tail(list,new_cmdbuf_node); + spin_unlock_irqrestore(dev->spinlock, flags); + vcmd_position[cmdbuf_obj->module_type]++; + if(vcmd_position[cmdbuf_obj->module_type]>=vcmd_type_core_num[cmdbuf_obj->module_type]) + vcmd_position[cmdbuf_obj->module_type]=0; + cmdbuf_obj->core_id = dev->core_id; + return 0; + } + + if((dev->sw_cmdbuf_rdy_num ==hw_rdy_cmdbuf_num)) + { + bi_list_insert_node_tail(list,new_cmdbuf_node); + spin_unlock_irqrestore(dev->spinlock, flags); + vcmd_position[cmdbuf_obj->module_type]++; + if(vcmd_position[cmdbuf_obj->module_type]>=vcmd_type_core_num[cmdbuf_obj->module_type]) + vcmd_position[cmdbuf_obj->module_type]=0; + cmdbuf_obj->core_id = dev->core_id; + return 0; + } + else + { + spin_unlock_irqrestore(dev->spinlock, flags); + vcmd_position[cmdbuf_obj->module_type]++; + if(vcmd_position[cmdbuf_obj->module_type]>=vcmd_type_core_num[cmdbuf_obj->module_type]) + vcmd_position[cmdbuf_obj->module_type]=0; + counter++; + } + if(counter>=vcmd_type_core_num[cmdbuf_obj->module_type]) + break; + } + + + + //there is no idle vcmd,if low priority,calculate exe time, select the least one. + // or if high priority, calculate the exe time, select the least one and abort it. + if(cmdbuf_obj->priority==CMDBUF_PRIORITY_NORMAL) + { + + counter =0; + //calculate total execute time of all devices + while(1) + { + dev = vcmd_manager[cmdbuf_obj->module_type][vcmd_position[cmdbuf_obj->module_type]]; + //read executing cmdbuf address + if(dev->hw_version_id <= HW_ID_1_0_C ) + { + exe_cmdbuf_addr = VCMDGetAddrRegisterValue((const void *)dev->hwregs,dev->reg_mirror,HWIF_VCMD_EXECUTING_CMD_ADDR); + list=&dev->list_manager; + spin_lock_irqsave(dev->spinlock, flags); + //get the executing cmdbuf node. + curr_cmdbuf_node=get_cmdbuf_node_in_list_by_addr(exe_cmdbuf_addr,list); + + //calculate total execute time of this device + dev->total_exe_time=calculate_executing_time_after_node(curr_cmdbuf_node); + spin_unlock_irqrestore(dev->spinlock, flags); + } + else + { + //cmdbuf_id = vcmd_get_register_value((const void *)dev->hwregs,dev->reg_mirror,HWIF_VCMD_CMDBUF_EXECUTING_ID); + cmdbuf_id = *(dev->vcmd_reg_mem_virtualAddress+EXECUTING_CMDBUF_ID_ADDR+1); + spin_lock_irqsave(dev->spinlock, flags); + if(cmdbuf_id>=TOTAL_DISCRETE_CMDBUF_NUM||cmdbuf_id == 0) + { + pr_err("cmdbuf_id greater than the ceiling !!\n"); + spin_unlock_irqrestore(dev->spinlock, flags); + return -1; + } + //get the executing cmdbuf node. + curr_cmdbuf_node=global_cmdbuf_node[cmdbuf_id]; + if(curr_cmdbuf_node==NULL) + { + list=&dev->list_manager; + curr_cmdbuf_node = list->head; + while(1) + { + if(curr_cmdbuf_node == NULL) + break; + cmdbuf_obj_temp =(struct cmdbuf_obj*) curr_cmdbuf_node->data; + if(cmdbuf_obj_temp->cmdbuf_data_linked&&cmdbuf_obj_temp->cmdbuf_run_done==0) + break; + curr_cmdbuf_node = curr_cmdbuf_node->next; + } + + } + + //calculate total execute time of this device + dev->total_exe_time=calculate_executing_time_after_node(curr_cmdbuf_node); + spin_unlock_irqrestore(dev->spinlock, flags); + + } + vcmd_position[cmdbuf_obj->module_type]++; + if(vcmd_position[cmdbuf_obj->module_type]>=vcmd_type_core_num[cmdbuf_obj->module_type]) + vcmd_position[cmdbuf_obj->module_type]=0; + counter++; + if(counter>=vcmd_type_core_num[cmdbuf_obj->module_type]) + break; + } + //find the device with the least total_exe_time. + counter =0; + executing_time=0xffffffff; + while(1) + { + dev = vcmd_manager[cmdbuf_obj->module_type][vcmd_position[cmdbuf_obj->module_type]]; + if(dev->total_exe_time <= executing_time) + { + executing_time = dev->total_exe_time; + smallest_dev = dev; + } + vcmd_position[cmdbuf_obj->module_type]++; + if(vcmd_position[cmdbuf_obj->module_type]>=vcmd_type_core_num[cmdbuf_obj->module_type]) + vcmd_position[cmdbuf_obj->module_type]=0; + counter++; + if(counter>=vcmd_type_core_num[cmdbuf_obj->module_type]) + break; + } + //insert list + list = &smallest_dev->list_manager; + spin_lock_irqsave(smallest_dev->spinlock, flags); + bi_list_insert_node_tail(list,new_cmdbuf_node); + spin_unlock_irqrestore(smallest_dev->spinlock, flags); + cmdbuf_obj->core_id = smallest_dev->core_id; + return 0; + } + else + { + //CMDBUF_PRIORITY_HIGH + counter =0; + //calculate total execute time of all devices + while(1) + { + dev = vcmd_manager[cmdbuf_obj->module_type][vcmd_position[cmdbuf_obj->module_type]]; + if(dev->hw_version_id <= HW_ID_1_0_C ) + { + //read executing cmdbuf address + exe_cmdbuf_addr = VCMDGetAddrRegisterValue((const void *)dev->hwregs,dev->reg_mirror,HWIF_VCMD_EXECUTING_CMD_ADDR); + list=&dev->list_manager; + spin_lock_irqsave(dev->spinlock, flags); + //get the executing cmdbuf node. + curr_cmdbuf_node=get_cmdbuf_node_in_list_by_addr(exe_cmdbuf_addr,list); + + //calculate total execute time of this device + dev->total_exe_time=calculate_executing_time_after_node_high_priority(curr_cmdbuf_node); + spin_unlock_irqrestore(dev->spinlock, flags); + } + else + { + //cmdbuf_id = vcmd_get_register_value((const void *)dev->hwregs,dev->reg_mirror,HWIF_VCMD_CMDBUF_EXECUTING_ID); + cmdbuf_id = *(dev->vcmd_reg_mem_virtualAddress+EXECUTING_CMDBUF_ID_ADDR); + spin_lock_irqsave(dev->spinlock, flags); + if(cmdbuf_id>=TOTAL_DISCRETE_CMDBUF_NUM||cmdbuf_id == 0) + { + pr_err("cmdbuf_id greater than the ceiling !!\n"); + spin_unlock_irqrestore(dev->spinlock, flags); + return -1; + } + //get the executing cmdbuf node. + curr_cmdbuf_node=global_cmdbuf_node[cmdbuf_id]; + if(curr_cmdbuf_node==NULL) + { + list=&dev->list_manager; + curr_cmdbuf_node = list->head; + while(1) + { + if(curr_cmdbuf_node == NULL) + break; + cmdbuf_obj_temp =(struct cmdbuf_obj*) curr_cmdbuf_node->data; + if(cmdbuf_obj_temp->cmdbuf_data_linked&&cmdbuf_obj_temp->cmdbuf_run_done==0) + break; + curr_cmdbuf_node = curr_cmdbuf_node->next; + } + } + + //calculate total execute time of this device + dev->total_exe_time=calculate_executing_time_after_node(curr_cmdbuf_node); + spin_unlock_irqrestore(dev->spinlock, flags); + } + vcmd_position[cmdbuf_obj->module_type]++; + if(vcmd_position[cmdbuf_obj->module_type]>=vcmd_type_core_num[cmdbuf_obj->module_type]) + vcmd_position[cmdbuf_obj->module_type]=0; + counter++; + if(counter>=vcmd_type_core_num[cmdbuf_obj->module_type]) + break; + } + //find the smallest device. + counter =0; + executing_time=0xffffffff; + while(1) + { + dev = vcmd_manager[cmdbuf_obj->module_type][vcmd_position[cmdbuf_obj->module_type]]; + if(dev->total_exe_time <= executing_time) + { + executing_time = dev->total_exe_time; + smallest_dev = dev; + } + vcmd_position[cmdbuf_obj->module_type]++; + if(vcmd_position[cmdbuf_obj->module_type]>=vcmd_type_core_num[cmdbuf_obj->module_type]) + vcmd_position[cmdbuf_obj->module_type]=0; + counter++; + if(counter>=vcmd_type_core_num[cmdbuf_obj->module_type]) + break; + } + //abort the vcmd and wait + vcmd_write_register_value((const void *)smallest_dev->hwregs,smallest_dev->reg_mirror,HWIF_VCMD_START_TRIGGER,0); + software_triger_abort = 1; + if(wait_event_interruptible(*smallest_dev->wait_abort_queue, wait_abort_rdy(smallest_dev)) ) + { + software_triger_abort = 0; + return -ERESTARTSYS; + } + software_triger_abort = 0; + //need to select inserting position again because hw maybe have run to the next node. + //CMDBUF_PRIORITY_HIGH + spin_lock_irqsave(smallest_dev->spinlock, flags); + curr_cmdbuf_node = smallest_dev->list_manager.head; + while(1) + { + //if list is empty or tail,insert to tail + if(curr_cmdbuf_node == NULL) + break; + cmdbuf_obj_temp= (struct cmdbuf_obj*)curr_cmdbuf_node->data; + //if find the first node which priority is normal, insert node prior to the node + if((cmdbuf_obj_temp->priority==CMDBUF_PRIORITY_NORMAL) && (cmdbuf_obj_temp->cmdbuf_run_done==0)) + break; + curr_cmdbuf_node = curr_cmdbuf_node->next; + } + bi_list_insert_node_before(list,curr_cmdbuf_node,new_cmdbuf_node); + cmdbuf_obj->core_id = smallest_dev->core_id; + spin_unlock_irqrestore(smallest_dev->spinlock, flags); + + return 0; + } + return 0; +} +static int wait_process_resource_rdy(struct process_manager_obj* process_manager_obj ) +{ + return process_manager_obj->total_exe_time<=PROCESS_MAX_SUM_OF_IMAGE_SIZE; +} + +static long reserve_cmdbuf(struct file *filp,struct exchange_parameter* input_para) +{ + bi_list_node* new_cmdbuf_node=NULL; + struct cmdbuf_obj* cmdbuf_obj=NULL; + bi_list_node* process_manager_node=NULL; + struct process_manager_obj* process_manager_obj=NULL; + unsigned long flags; + input_para->cmdbuf_id = 0; + if(input_para->cmdbuf_size>CMDBUF_MAX_SIZE) + { + return -1; + } + spin_lock_irqsave(&vcmd_process_manager_lock, flags); + process_manager_node = global_process_manager.head; + while(1) + { + if(process_manager_node == NULL) + { + //should not happen + pr_err("hantrovcmd: ERROR process_manager_node !!\n"); + spin_unlock_irqrestore(&vcmd_process_manager_lock, flags); + return -1; + } + process_manager_obj = (struct process_manager_obj*)process_manager_node->data; + if (filp==process_manager_obj->filp) + { + break; + } + process_manager_node = process_manager_node->next; + } + spin_unlock_irqrestore(&vcmd_process_manager_lock, flags); + spin_lock_irqsave(&process_manager_obj->spinlock, flags); + process_manager_obj->total_exe_time += input_para->executing_time; + spin_unlock_irqrestore(&process_manager_obj->spinlock, flags); + if(!wait_event_interruptible_timeout(process_manager_obj->wait_queue, wait_process_resource_rdy(process_manager_obj), + msecs_to_jiffies(500) )) + { + pr_err("vc8000e: wait_process_resource_rdy timeout! total_exe_time %lld\n",process_manager_obj->total_exe_time); + return -1; + } + + + new_cmdbuf_node=create_cmdbuf_node(); + if(new_cmdbuf_node==NULL) + return -1; + + cmdbuf_obj = (struct cmdbuf_obj* )new_cmdbuf_node->data; + cmdbuf_obj->module_type = input_para->module_type; + cmdbuf_obj->priority = input_para->priority; + cmdbuf_obj->executing_time = input_para->executing_time; + cmdbuf_obj->cmdbuf_size = CMDBUF_MAX_SIZE; + input_para->cmdbuf_size =CMDBUF_MAX_SIZE; + cmdbuf_obj->filp = filp; + cmdbuf_obj->process_manager_obj =process_manager_obj; + + input_para->cmdbuf_id=cmdbuf_obj->cmdbuf_id; + global_cmdbuf_node[input_para->cmdbuf_id] = new_cmdbuf_node; + + return 0; +} + +static long release_cmdbuf(struct file *filp,u16 cmdbuf_id) +{ + struct cmdbuf_obj* cmdbuf_obj=NULL; + bi_list_node* last_cmdbuf_node=NULL; + bi_list_node* new_cmdbuf_node=NULL; + bi_list* list=NULL; + u32 module_type; + + + unsigned long flags; + struct hantrovcmd_dev* dev=NULL; + /*get cmdbuf object according to cmdbuf_id*/ + new_cmdbuf_node = global_cmdbuf_node[cmdbuf_id]; + if(new_cmdbuf_node==NULL) + { + //should not happen + pr_err("hantrovcmd: ERROR cmdbuf_id !!\n"); + return -1; + } + cmdbuf_obj=(struct cmdbuf_obj*)new_cmdbuf_node->data; + if(cmdbuf_obj->filp!=filp) + { + //should not happen + pr_err("hantrovcmd: ERROR cmdbuf_id !!\n"); + return -1; + } + module_type = cmdbuf_obj->module_type; + //TODO + if (down_interruptible(&vcmd_reserve_cmdbuf_sem[module_type])) + return -ERESTARTSYS; + dev = &hantrovcmd_data[cmdbuf_obj->core_id]; + + //spin_lock_irqsave(dev->spinlock, flags); + list=&dev->list_manager; + cmdbuf_obj->cmdbuf_need_remove=1; + last_cmdbuf_node = new_cmdbuf_node->previous; + while(1) + { + //remove current node + cmdbuf_obj=(struct cmdbuf_obj*)new_cmdbuf_node->data; + if(cmdbuf_obj->cmdbuf_need_remove==1) + { + new_cmdbuf_node=remove_cmdbuf_node_from_list(list,new_cmdbuf_node); + if(new_cmdbuf_node) + { + //free node + global_cmdbuf_node[cmdbuf_obj->cmdbuf_id] = NULL; + if(cmdbuf_obj->process_manager_obj) + { + spin_lock_irqsave(&cmdbuf_obj->process_manager_obj->spinlock, flags); + cmdbuf_obj->process_manager_obj->total_exe_time -= cmdbuf_obj->executing_time; + spin_unlock_irqrestore(&cmdbuf_obj->process_manager_obj->spinlock, flags); + wake_up_all(&cmdbuf_obj->process_manager_obj->wait_queue); + } + free_cmdbuf_node(new_cmdbuf_node); + + } + } + if(last_cmdbuf_node==NULL) + break; + new_cmdbuf_node=last_cmdbuf_node; + last_cmdbuf_node=new_cmdbuf_node->previous; + } + //spin_unlock_irqrestore(dev->spinlock, flags); + up(&vcmd_reserve_cmdbuf_sem[module_type]); + return 0; +} +static long release_cmdbuf_node(bi_list* list,bi_list_node*cmdbuf_node) +{ + bi_list_node* new_cmdbuf_node=NULL; + struct cmdbuf_obj* cmdbuf_obj=NULL; + /*get cmdbuf object according to cmdbuf_id*/ + new_cmdbuf_node=cmdbuf_node; + if(new_cmdbuf_node==NULL) + return -1; + //remove node from list + new_cmdbuf_node=remove_cmdbuf_node_from_list(list,new_cmdbuf_node); + if(new_cmdbuf_node) + { + //free node + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + global_cmdbuf_node[cmdbuf_obj->cmdbuf_id] = NULL; + free_cmdbuf_node(new_cmdbuf_node); + return 0; + } + return 1; +} + +static long release_cmdbuf_node_cleanup(bi_list* list) +{ + bi_list_node* new_cmdbuf_node=NULL; + struct cmdbuf_obj* cmdbuf_obj=NULL; + while(1) + { + new_cmdbuf_node=list->head; + if(new_cmdbuf_node==NULL) + return 0; + //remove node from list + bi_list_remove_node(list,new_cmdbuf_node); + //free node + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + global_cmdbuf_node[cmdbuf_obj->cmdbuf_id] = NULL; + free_cmdbuf_node(new_cmdbuf_node); + } + return 0; +} + + + +static bi_list_node* find_last_linked_cmdbuf(bi_list_node* current_node) +{ + bi_list_node* new_cmdbuf_node=current_node; + bi_list_node* last_cmdbuf_node; + struct cmdbuf_obj* cmdbuf_obj=NULL; + if(current_node==NULL) + return NULL; + last_cmdbuf_node = new_cmdbuf_node; + new_cmdbuf_node = new_cmdbuf_node->previous; + while(1) + { + if(new_cmdbuf_node==NULL) + return last_cmdbuf_node; + cmdbuf_obj=(struct cmdbuf_obj*)new_cmdbuf_node->data; + if(cmdbuf_obj->cmdbuf_data_linked) + { + return new_cmdbuf_node; + } + last_cmdbuf_node = new_cmdbuf_node; + new_cmdbuf_node = new_cmdbuf_node->previous; + } + return NULL; +} +static long link_and_run_cmdbuf(struct file *filp,struct exchange_parameter* input_para) +{ + struct cmdbuf_obj* cmdbuf_obj=NULL; + bi_list_node* new_cmdbuf_node=NULL; + bi_list_node* last_cmdbuf_node; + u32* jmp_addr=NULL; + u32 opCode; + u32 tempOpcode; + u32 record_last_cmdbuf_rdy_num; + struct hantrovcmd_dev* dev=NULL; + unsigned long flags; + int return_value; + u16 cmdbuf_id=input_para->cmdbuf_id; + + + new_cmdbuf_node = global_cmdbuf_node[cmdbuf_id]; + if(new_cmdbuf_node==NULL) + { + //should not happen + pr_err("hantrovcmd: ERROR cmdbuf_id !!\n"); + return -1; + } + cmdbuf_obj=(struct cmdbuf_obj*)new_cmdbuf_node->data; + if(cmdbuf_obj->filp!=filp) + { + //should not happen + pr_err("hantrovcmd: ERROR cmdbuf_id !!\n"); + return -1; + } + cmdbuf_obj->cmdbuf_data_loaded=1; + cmdbuf_obj->cmdbuf_size=input_para->cmdbuf_size; +#ifdef VCMD_DEBUG_INTERNAL + { + u32 i; + pr_info("vcmd link, current cmdbuf content\n"); + for(i=0;icmdbuf_size/4;i++) + { + pr_info("current cmdbuf data %d =0x%x\n",i,*(cmdbuf_obj->cmdbuf_virtualAddress+i)); + } + } +#endif + //test nop and end opcode, then assign value. + cmdbuf_obj->has_end_cmdbuf=0; //0: has jmp opcode,1 has end code + cmdbuf_obj->no_normal_int_cmdbuf=0; //0: interrupt when JMP,1 not interrupt when JMP + jmp_addr = cmdbuf_obj->cmdbuf_virtualAddress + (cmdbuf_obj->cmdbuf_size/4); + opCode=tempOpcode = *(jmp_addr-4); + opCode >>=27; + opCode <<=27; + //we can't identify END opcode or JMP opcode, so we don't support END opcode in control sw and driver. + if(opCode == OPCODE_JMP) + { + //jmp + opCode=tempOpcode; + opCode &=0x02000000; + if(opCode == JMP_IE_1) + { + cmdbuf_obj->no_normal_int_cmdbuf=0; + } + else + { + cmdbuf_obj->no_normal_int_cmdbuf=1; + } + } + else + { + //not support other opcode + return -1; + } + + if (down_interruptible(&vcmd_reserve_cmdbuf_sem[cmdbuf_obj->module_type])) + return -ERESTARTSYS; + + venc_vcmd_profile.cur_submit_vcmd_id = input_para->cmdbuf_id; + encoder_devfreq_record_busy( encoder_get_devfreq_priv_data() ); + + return_value=select_vcmd(new_cmdbuf_node); + if(return_value) + return return_value; + + dev = &hantrovcmd_data[cmdbuf_obj->core_id]; + input_para->core_id = cmdbuf_obj->core_id; + PDEBUG("filp=%px, VCMD Link CMDBUF [%d] to core [%d]\n", (void *)filp, cmdbuf_id, input_para->core_id); + //set ddr address for vcmd registers copy. + if(dev->hw_version_id > HW_ID_1_0_C ) + { + //read vcmd executing register into ddr memory. + //now core id is got and output ddr address of vcmd register can be filled in. + //each core has its own fixed output ddr address of vcmd registers. + jmp_addr = cmdbuf_obj->cmdbuf_virtualAddress; + if (mmu_enable) { + *(jmp_addr + 2) = 0; + *(jmp_addr+1) = (u32)((dev->mmu_vcmd_reg_mem_busAddress + (EXECUTING_CMDBUF_ID_ADDR+1)*4)); + } else { + if(sizeof(size_t) == 8) { + *(jmp_addr + 2) = (u32)((u64)(dev->vcmd_reg_mem_busAddress + (EXECUTING_CMDBUF_ID_ADDR+1)*4)>>32); + } else { + *(jmp_addr + 2) = 0; + } + *(jmp_addr+1) = (u32)((dev->vcmd_reg_mem_busAddress + (EXECUTING_CMDBUF_ID_ADDR+1)*4)); + } + + jmp_addr = cmdbuf_obj->cmdbuf_virtualAddress + (cmdbuf_obj->cmdbuf_size/4); + //read vcmd all registers into ddr memory. + //now core id is got and output ddr address of vcmd registers can be filled in. + //each core has its own fixed output ddr address of vcmd registers. + if (mmu_enable) { + if(sizeof(size_t) == 8) { + *(jmp_addr-6) = 0; + } + *(jmp_addr-7) = (u32)(dev->mmu_vcmd_reg_mem_busAddress); + } else { + if(sizeof(size_t) == 8) { + *(jmp_addr-6) = (u32)((u64)dev->vcmd_reg_mem_busAddress>>32); + } else { + *(jmp_addr-6) = 0; + } + *(jmp_addr-7) = (u32)(dev->vcmd_reg_mem_busAddress); + } + } + //start to link and/or run + spin_lock_irqsave(dev->spinlock, flags); + last_cmdbuf_node = find_last_linked_cmdbuf(new_cmdbuf_node); + record_last_cmdbuf_rdy_num=dev->sw_cmdbuf_rdy_num; + vcmd_link_cmdbuf(dev,last_cmdbuf_node); + dev->last_linked_cmdbuf_node = new_cmdbuf_node; //record for resume when pm work + if(dev->working_state==WORKING_STATE_IDLE) + { + //run + while (last_cmdbuf_node && + ((struct cmdbuf_obj*)last_cmdbuf_node->data)->cmdbuf_run_done) + last_cmdbuf_node = last_cmdbuf_node->next; + + if (last_cmdbuf_node && last_cmdbuf_node->data) { + PDEBUG("vcmd start for cmdbuf id %d, cmdbuf_run_done = %d\n", + ((struct cmdbuf_obj*)last_cmdbuf_node->data)->cmdbuf_id, + ((struct cmdbuf_obj*)last_cmdbuf_node->data)->cmdbuf_run_done); + } + vcmd_start(dev,last_cmdbuf_node); + } + else + { + //just update cmdbuf ready number + if(record_last_cmdbuf_rdy_num!=dev->sw_cmdbuf_rdy_num) + vcmd_write_register_value((const void *)dev->hwregs,dev->reg_mirror,HWIF_VCMD_RDY_CMDBUF_COUNT,dev->sw_cmdbuf_rdy_num); + } + spin_unlock_irqrestore(dev->spinlock, flags); + + up(&vcmd_reserve_cmdbuf_sem[cmdbuf_obj->module_type]); + + + return 0; +} + +/******************************************************************************/ +static int check_cmdbuf_irq(struct hantrovcmd_dev* dev,struct cmdbuf_obj* cmdbuf_obj,u32 *irq_status_ret) +{ + + int rdy = 0; + unsigned long flags; + spin_lock_irqsave(dev->spinlock, flags); + if(cmdbuf_obj->cmdbuf_run_done) + { + rdy = 1; + *irq_status_ret=cmdbuf_obj->executing_status;//need to decide how to assign this variable + } + spin_unlock_irqrestore(dev->spinlock, flags); + return rdy; +} +#ifdef IRQ_SIMULATION +static void get_random_bytes(void *buf, int nbytes); +#if 0 +void hantrovcmd_trigger_irq_0(struct timer_list* timer) +{ + PDEBUG("trigger core 0 irq\n"); + del_timer(timer); + hantrovcmd_isr(0,(void *)&hantrovcmd_data[0]); +} +void hantrovcmd_trigger_irq_1(struct timer_list* timer) +{ + PDEBUG("trigger core 1 irq\n"); + del_timer(timer); + hantrovcmd_isr(0,(void *)&hantrovcmd_data[1]); +} +#else +static void hantrovcmd_trigger_irq(struct timer_list *timer) +{ + + u32 timer_id=0; + u32 core_id=0; + u32 i; + for(i=0;i<10000;i++) + { + if(timer_reserve[i].timer==timer) + { + timer_id=timer_reserve[i].timer_id; + core_id = timer_reserve[i].core_id; + break; + } + } + PDEBUG("trigger core 0 irq\n"); + hantrovcmd_isr(core_id,(void *)&hantrovcmd_data[core_id]); + del_timer(timer); + timer_reserve[timer_id].timer=NULL; +} + +#endif +#endif + +static unsigned int wait_cmdbuf_ready(struct file *filp,u16 cmdbuf_id,u32 *irq_status_ret) +{ + + struct cmdbuf_obj* cmdbuf_obj=NULL; + bi_list_node* new_cmdbuf_node=NULL; + struct hantrovcmd_dev* dev=NULL; + + PDEBUG("wait_cmdbuf_ready\n"); + new_cmdbuf_node = global_cmdbuf_node[cmdbuf_id]; + if(new_cmdbuf_node==NULL) + { + //should not happen + pr_err("hantrovcmd: ERROR cmdbuf_id !!\n"); + return -1; + } + cmdbuf_obj=(struct cmdbuf_obj*)new_cmdbuf_node->data; + if(cmdbuf_obj->filp!=filp) + { + //should not happen + pr_err("hantrovcmd: ERROR cmdbuf_id !!\n"); + return -1; + } + dev = &hantrovcmd_data[cmdbuf_obj->core_id]; +#ifdef IRQ_SIMULATION + { + u32 random_num; + //get_random_bytes(&random_num, sizeof(u32)); + random_num = (u32)((u64)100*cmdbuf_obj->executing_time/(4096*2160)+50); + PDEBUG("random_num=%d\n",random_num); +#if 0 + /*init a timer to trigger irq*/ + if (cmdbuf_obj->core_id==0) + { + //init_timer(&timer0); + //timer0.function = hantrovcmd_trigger_irq_0; + timer_setup(&timer0,hantrovcmd_trigger_irq_0,0); + timer0.expires = jiffies + random_num*HZ/10; //the expires time is 1s + add_timer(&timer0); + } + + if (cmdbuf_obj->core_id==1) + { + //init_timer(&timer1); + //timer1.function = hantrovcmd_trigger_irq_1; + timer_setup(&timer1,hantrovcmd_trigger_irq_1,0); + timer1.expires = jiffies + random_num*HZ/10; //the expires time is 1s + add_timer(&timer1); + } +#else + { + u32 i; + struct timer_list *temp_timer=NULL; + for(i=0;i<10000;i++) + { + if(timer_reserve[i].timer==NULL) + { + timer_reserve[i].timer_id=i; + timer_reserve[i].core_id=cmdbuf_obj->core_id; + temp_timer=timer_reserve[i].timer =&timer[i] ; + break; + } + } + //if (cmdbuf_obj->core_id==0) + { + //init_timer(&timer0); + //timer0.function = hantrovcmd_trigger_irq_0; + timer_setup(temp_timer,hantrovcmd_trigger_irq,0); + temp_timer->expires = jiffies + random_num*HZ/10; //the expires time is 1s + add_timer(temp_timer); + } + } +#endif + } +#endif + /*In suspend, it got a signal from "Freezing user space processes" period, + * wait_event_interruptible() will be interrupted,return to user space a IO error. + * So, here changed to wait_event_timeout(). + */ + if(!wait_event_timeout(*dev->wait_queue, + check_cmdbuf_irq(dev,cmdbuf_obj,irq_status_ret), msecs_to_jiffies(500) ) + ) + { + pr_err("vc8000e:vcmd_wait_queue_0 timeout cmdbuf[%d]\n",cmdbuf_id); + //abort the vcmd + vcmd_write_register_value((const void *)dev->hwregs,dev->reg_mirror,HWIF_VCMD_START_TRIGGER,0); + return -ETIME; + } + return 0; +} + +bool hantrovcmd_devfreq_check_state(void) +{ + struct hantrovcmd_dev *dev = hantrovcmd_data; + u32 core_id = 0; + u32 state = 0; + for (core_id = 0;core_id < total_vcmd_core_num; core_id++) { + state = vcmd_get_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_WORK_STATE); + if((state != 0) && (state != 3) ) //HW state pend or idle + return false; + } + return true; +} + +static long hantrovcmd_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + struct device *dev = &hantrovcmd_data->pdev->dev; + struct process_manager_obj* process_manager_obj=NULL; + + PDEBUG("ioctl cmd 0x%08x\n", cmd); + /* + * extract the type and number bitfields, and don't encode + * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() + */ + if(_IOC_TYPE(cmd) != HANTRO_IOC_MAGIC +#ifdef HANTROMMU_SUPPORT + && _IOC_TYPE(cmd) != HANTRO_IOC_MMU +#endif + ) + return -ENOTTY; + if((_IOC_TYPE(cmd) == HANTRO_IOC_MAGIC && + _IOC_NR(cmd) > HANTRO_IOC_MAXNR) +#ifdef HANTROMMU_SUPPORT + ||(_IOC_TYPE(cmd) == HANTRO_IOC_MMU && + _IOC_NR(cmd) > HANTRO_IOC_MMU_MAXNR) +#endif + ) + return -ENOTTY; + + /* + * the direction is a bitmask, and VERIFY_WRITE catches R/W + * transfers. `Type' is user-oriented, while + * access_ok is kernel-oriented, so the concept of "read" and + * "write" is reversed + */ + if(_IOC_DIR(cmd) & _IOC_READ) +#if KERNEL_VERSION(5,0,0) <= LINUX_VERSION_CODE + err = !access_ok((void *) arg, _IOC_SIZE(cmd)); +#else + err = !access_ok(VERIFY_WRITE, (void *) arg, _IOC_SIZE(cmd)); +#endif + else if(_IOC_DIR(cmd) & _IOC_WRITE) +#if KERNEL_VERSION(5,0,0) <= LINUX_VERSION_CODE + err = !access_ok((void *) arg, _IOC_SIZE(cmd)); +#else + err = !access_ok(VERIFY_READ, (void *) arg, _IOC_SIZE(cmd)); +#endif + if(err) + return -EFAULT; + + process_manager_obj = (struct process_manager_obj*)filp->private_data; + + switch (cmd) + { + case HANTRO_IOCH_GET_VCMD_ENABLE: + { + __put_user(1, (unsigned long *) arg); + break; + } + case HANTRO_IOCH_GET_CMDBUF_PARAMETER: + { + struct cmdbuf_mem_parameter local_cmdbuf_mem_data; + PDEBUG(" VCMD GET_CMDBUF_PARAMETER\n"); + local_cmdbuf_mem_data.cmdbuf_unit_size = CMDBUF_MAX_SIZE; + local_cmdbuf_mem_data.status_cmdbuf_unit_size = CMDBUF_MAX_SIZE; + local_cmdbuf_mem_data.cmdbuf_total_size = CMDBUF_POOL_TOTAL_SIZE; + local_cmdbuf_mem_data.status_cmdbuf_total_size = CMDBUF_POOL_TOTAL_SIZE; + local_cmdbuf_mem_data.phy_status_cmdbuf_addr = vcmd_status_buf_mem_pool.busAddress; + local_cmdbuf_mem_data.phy_cmdbuf_addr = vcmd_buf_mem_pool.busAddress; + if (mmu_enable) { + local_cmdbuf_mem_data.mmu_phy_status_cmdbuf_addr = vcmd_status_buf_mem_pool.mmu_bus_address; + local_cmdbuf_mem_data.mmu_phy_cmdbuf_addr = vcmd_buf_mem_pool.mmu_bus_address; + } else { + local_cmdbuf_mem_data.mmu_phy_status_cmdbuf_addr = 0; + local_cmdbuf_mem_data.mmu_phy_cmdbuf_addr = 0; + } + local_cmdbuf_mem_data.base_ddr_addr = base_ddr_addr; + copy_to_user((struct cmdbuf_mem_parameter*)arg,&local_cmdbuf_mem_data,sizeof(struct cmdbuf_mem_parameter)); + break; + } + case HANTRO_IOCH_GET_VCMD_PARAMETER: + { + struct config_parameter input_para; + PDEBUG(" VCMD get vcmd config parameter \n"); + copy_from_user(&input_para,(struct config_parameter*)arg,sizeof(struct config_parameter)); + if(vcmd_type_core_num[input_para.module_type]) + { + input_para.submodule_main_addr = vcmd_manager[input_para.module_type][0]->vcmd_core_cfg.submodule_main_addr; + input_para.submodule_dec400_addr = vcmd_manager[input_para.module_type][0]->vcmd_core_cfg.submodule_dec400_addr; + input_para.submodule_L2Cache_addr = vcmd_manager[input_para.module_type][0]->vcmd_core_cfg.submodule_L2Cache_addr; + input_para.submodule_MMU_addr[0] = vcmd_manager[input_para.module_type][0]->vcmd_core_cfg.submodule_MMU_addr[0]; + input_para.submodule_MMU_addr[1] = vcmd_manager[input_para.module_type][0]->vcmd_core_cfg.submodule_MMU_addr[1]; + input_para.submodule_axife_addr[0] = vcmd_manager[input_para.module_type][0]->vcmd_core_cfg.submodule_axife_addr[0]; + input_para.submodule_axife_addr[1] = vcmd_manager[input_para.module_type][0]->vcmd_core_cfg.submodule_axife_addr[1]; + input_para.config_status_cmdbuf_id = vcmd_manager[input_para.module_type][0]->status_cmdbuf_id; + input_para.vcmd_hw_version_id = vcmd_manager[input_para.module_type][0]->hw_version_id; + input_para.vcmd_core_num = vcmd_type_core_num[input_para.module_type]; + } + else + { + input_para.submodule_main_addr = 0xffff; + input_para.submodule_dec400_addr = 0xffff; + input_para.submodule_L2Cache_addr = 0xffff; + input_para.submodule_MMU_addr[0] = 0xffff; + input_para.submodule_MMU_addr[1] = 0xffff; + input_para.submodule_axife_addr[0] = 0xffff; + input_para.submodule_axife_addr[1] = 0xffff; + input_para.config_status_cmdbuf_id = 0; + input_para.vcmd_core_num = 0; + input_para.vcmd_hw_version_id =HW_ID_1_0_C; + } + copy_to_user((struct config_parameter*)arg,&input_para,sizeof(struct config_parameter)); + break; + } + case HANTRO_IOCH_RESERVE_CMDBUF: + { + int ret; + struct exchange_parameter input_para; + copy_from_user(&input_para,(struct exchange_parameter*)arg,sizeof(struct exchange_parameter)); + ret = reserve_cmdbuf(filp,&input_para); + if (ret == 0) + copy_to_user((struct exchange_parameter*)arg,&input_para,sizeof(struct exchange_parameter)); + PDEBUG("filp=%px, VCMD Reserve CMDBUF [%d] remain %d \n", (void *)filp, input_para.cmdbuf_id,cmdbuf_used_residual); + return ret; + } + + case HANTRO_IOCH_LINK_RUN_CMDBUF: + { + struct exchange_parameter input_para; + long retVal; + copy_from_user(&input_para,(struct exchange_parameter*)arg,sizeof(struct exchange_parameter)); + + PDEBUG("filp=%px,VCMD link and run cmdbuf,[%d] \n",(void *)filp, input_para.cmdbuf_id); + pm_runtime_resume_and_get(dev); + if (process_manager_obj) + process_manager_obj->pm_count++; + retVal = link_and_run_cmdbuf(filp,&input_para); + copy_to_user((struct exchange_parameter*)arg,&input_para,sizeof(struct exchange_parameter)); + return retVal; + break; + } + + case HANTRO_IOCH_WAIT_CMDBUF: + { + + u16 cmdbuf_id; + unsigned int tmp; + u32 irq_status_ret=0; + __get_user(cmdbuf_id, (u16*)arg); + /*high 16 bits are core id, low 16 bits are cmdbuf_id*/ + + PDEBUG("filp=%px,VCMD wait for CMDBUF finishing. %d,\n",filp,cmdbuf_id); + + //TODO + tmp = wait_cmdbuf_ready(filp,cmdbuf_id,&irq_status_ret); + PDEBUG("filp=%px,VCMD wait for CMDBUF finished. %d,\n",filp,cmdbuf_id); + cmdbuf_id=(u16)irq_status_ret; + if (tmp==0) + { + __put_user(cmdbuf_id, (u16 *)arg); + return tmp;//return core_id + } + else + { + return -1; + } + + break; + } + case HANTRO_IOCH_RELEASE_CMDBUF: + { + u16 cmdbuf_id; + __get_user(cmdbuf_id, (u16*)arg); + /*16 bits are cmdbuf_id*/ + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + if (process_manager_obj) + process_manager_obj->pm_count--; + release_cmdbuf(filp,cmdbuf_id); + PDEBUG("filp=%px,VCMD release CMDBUF ,%d,remain %d \n",filp,cmdbuf_id,cmdbuf_used_residual); + return 0; + break; + } + case HANTRO_IOCH_POLLING_CMDBUF: + { + u16 core_id; + __get_user(core_id, (u16*)arg); + /*16 bits are cmdbuf_id*/ + if(core_id>=total_vcmd_core_num) + return -1; + hantrovcmd_isr(core_id,&hantrovcmd_data[core_id]); + return 0; + break; + } + default: + { +#ifdef HANTROMMU_SUPPORT + if(_IOC_TYPE(cmd) == HANTRO_IOC_MMU) + { + pm_runtime_resume_and_get(dev); + long retval = MMUIoctl(cmd, filp, arg, mmu_hwregs); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + return retval; + } +#endif + } + } + return 0; +} +/**********************************************************************************************************\ +*process manager object management +\***********************************************************************************************************/ +static struct process_manager_obj* create_process_manager_obj(void) +{ + struct process_manager_obj* process_manager_obj=NULL; + process_manager_obj=vmalloc(sizeof(struct process_manager_obj)); + if(process_manager_obj==NULL) + { + PDEBUG ("%s\n","vmalloc for process_manager_obj fail!"); + return process_manager_obj; + } + memset(process_manager_obj,0,sizeof(struct process_manager_obj)); + return process_manager_obj; +} + +static void free_process_manager_obj( struct process_manager_obj* process_manager_obj) +{ + if(process_manager_obj==NULL) + { + PDEBUG ("%s\n","free_process_manager_obj NULL"); + return; + } + //free current cmdbuf_obj + vfree(process_manager_obj); + return; +} + +static bi_list_node* create_process_manager_node(void) +{ + bi_list_node* current_node=NULL; + struct process_manager_obj* process_manager_obj=NULL; + + process_manager_obj=create_process_manager_obj(); + if(process_manager_obj==NULL) + { + PDEBUG ("%s\n","create_process_manager_obj fail!"); + return NULL; + } + process_manager_obj->total_exe_time = 0; + process_manager_obj->pm_count = 0; + spin_lock_init(&process_manager_obj->spinlock); + init_waitqueue_head(&process_manager_obj->wait_queue); + current_node=bi_list_create_node(); + if(current_node==NULL) + { + PDEBUG ("%s\n","bi_list_create_node fail!"); + free_process_manager_obj(process_manager_obj); + return NULL; + } + current_node->data = (void*)process_manager_obj; + return current_node; +} +static void free_process_manager_node(bi_list_node* process_node) +{ + struct process_manager_obj* process_manager_obj=NULL; + if(process_node==NULL) + { + PDEBUG ("%s\n","free_process_manager_node NULL"); + return; + } + process_manager_obj = (struct process_manager_obj*)process_node->data; + //free struct process_manager_obj + free_process_manager_obj (process_manager_obj); + //free current process_manager_obj entity. + bi_list_free_node(process_node); + return; +} + +static long release_process_node_cleanup(bi_list* list) +{ + bi_list_node* new_process_node=NULL; + + while(1) + { + new_process_node=list->head; + if(new_process_node==NULL) + break; + //remove node from list + bi_list_remove_node(list,new_process_node); + //remove node from list + free_process_manager_node(new_process_node); + } + return 0; +} + +static void create_kernel_process_manager(void) +{ + bi_list_node* process_manager_node; + struct process_manager_obj* process_manager_obj=NULL; + process_manager_node = create_process_manager_node(); + process_manager_obj = (struct process_manager_obj*)process_manager_node->data; + process_manager_obj->filp = NULL; + bi_list_insert_node_tail(&global_process_manager,process_manager_node); +} + +/* Update the last JMP cmd in cmdbuf_ojb in order to jump to next_cmdbuf_obj. */ +static void cmdbuf_update_jmp_cmd(int hw_version_id, + struct cmdbuf_obj *cmdbuf_obj, + struct cmdbuf_obj *next_cmdbuf_obj, + int jmp_IE_1) { + u32 *jmp_addr; + u32 operation_code; + + if(!cmdbuf_obj) + return; + + if(cmdbuf_obj->has_end_cmdbuf==0) + { + //need to link, current cmdbuf link to next cmdbuf + jmp_addr = cmdbuf_obj->cmdbuf_virtualAddress + (cmdbuf_obj->cmdbuf_size/4); + if (!next_cmdbuf_obj) { + operation_code = *(jmp_addr-4); + operation_code >>=16; + operation_code <<=16; + *(jmp_addr-4)=(u32)(operation_code & ~JMP_RDY_1); + } else { + if(hw_version_id > HW_ID_1_0_C ) + { + //set next cmdbuf id + *(jmp_addr-1) = next_cmdbuf_obj->cmdbuf_id; + } + if (mmu_enable) { + if(sizeof(size_t) == 8) { + *(jmp_addr-2)=(u32)((u64)(next_cmdbuf_obj->mmu_cmdbuf_busAddress)>>32); + } else { + *(jmp_addr-2)=0; + } + *(jmp_addr-3)=(u32)(next_cmdbuf_obj->mmu_cmdbuf_busAddress); + } else { + if(sizeof(size_t) == 8) { + *(jmp_addr-2)=(u32)((u64)(next_cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr)>>32); + } else { + *(jmp_addr-2)=0; + } + *(jmp_addr-3)=(u32)(next_cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr); + } + operation_code = *(jmp_addr-4); + operation_code >>=16; + operation_code <<=16; + *(jmp_addr-4)=(u32)(operation_code |JMP_RDY_1|jmp_IE_1|((next_cmdbuf_obj->cmdbuf_size+7)/8)); + } + +#ifdef VCMD_DEBUG_INTERNAL + { + u32 i; + pr_info("vcmd link, last cmdbuf content\n"); + for(i=cmdbuf_obj->cmdbuf_size/4 -8;icmdbuf_size/4;i++) + { + pr_info("current linked cmdbuf data %d =0x%x\n",i,*(cmdbuf_obj->cmdbuf_virtualAddress+i)); + } + } +#endif + } +} + + +/* delink given cmd buffer (cmdbuf_node) and remove it from list. + Also modify the last JMP of buf P to point to cmdbuf N. + Used when a process is terminated but there are pending cmd bufs in vmcd list. + E.g., + before: + + L->L->...->P->X->N-> ... ->L + ^ ^ ^ + head cmdbuf_node tail + + end: + + L->L->...->P->N-> ... ->L + ^ ^ + head tail + + Return: pointer to N or NULL if N doesn't exist. + */ +void vcmd_delink_rm_cmdbuf(struct hantrovcmd_dev *dev, bi_list_node* cmdbuf_node) +{ + bi_list *list = &dev->list_manager; + struct cmdbuf_obj* cmdbuf_obj = (struct cmdbuf_obj*)cmdbuf_node->data; + bi_list_node* prev = cmdbuf_node->previous; + bi_list_node* next = cmdbuf_node->next; + + PDEBUG("Delink and remove cmdbuf [%d] from vcmd list.\n", cmdbuf_obj->cmdbuf_id); + if (prev) { + PDEBUG("prev cmdbuf [%d].\n", ((struct cmdbuf_obj*)prev->data)->cmdbuf_id); + } else { + PDEBUG("NO prev cmdbuf.\n"); + } + if (next) { + PDEBUG("next cmdbuf [%d].\n", ((struct cmdbuf_obj*)next->data)->cmdbuf_id); + } else { + PDEBUG("NO next cmdbuf.\n"); + } + + bi_list_remove_node(list, cmdbuf_node); + global_cmdbuf_node[cmdbuf_obj->cmdbuf_id] = NULL; + free_cmdbuf_node(cmdbuf_node); + + cmdbuf_update_jmp_cmd(dev->hw_version_id, prev ? prev->data : NULL, next ? next->data : NULL, + dev->duration_without_int > INT_MIN_SUM_OF_IMAGE_SIZE); +} + +static int hantrovcmd_open(struct inode *inode, struct file *filp) +{ + int result = 0; + struct hantrovcmd_dev *dev = hantrovcmd_data; + bi_list_node* process_manager_node; + unsigned long flags; + struct process_manager_obj* process_manager_obj=NULL; + + filp->private_data = NULL; + process_manager_node = create_process_manager_node(); + if(process_manager_node== NULL) + return -1; + process_manager_obj = (struct process_manager_obj*)process_manager_node->data; + process_manager_obj->filp = filp; + spin_lock_irqsave(&vcmd_process_manager_lock, flags); + bi_list_insert_node_tail(&global_process_manager,process_manager_node); + spin_unlock_irqrestore(&vcmd_process_manager_lock, flags); + filp->private_data = process_manager_node->data; + + PDEBUG("process node %px for filp opened %px\n", (void *)process_manager_node, (void *)filp); + return result; +} + + +/* hantrovcmd_flush -- blocking wait until cmdbuf done for memory can be released + * To fix memory free but hardware in-using issue, caused in kill process which + * vidmem file ops release may be before this drivers file ops release or waiting + * cmd done. + * This flush ops sequence depends on devices open sequence in user mode drivers. + * So check user mode driver opened sequence as open this dev then dev/vidmem. + */ +static int hantrovcmd_flush(struct file *filp, fl_owner_t id) +{ + struct hantrovcmd_dev *dev = hantrovcmd_data; + u32 core_id = 0; + bi_list_node* new_cmdbuf_node=NULL; + struct cmdbuf_obj* cmdbuf_obj_temp=NULL; + unsigned long flags; + + PDEBUG("dev flushing for process %px\n", (void *)filp); + down(&vcmd_reserve_cmdbuf_sem[dev->vcmd_core_cfg.sub_module_type]); //should be wait,do not use interruptible interface + + for (core_id = 0;core_id < total_vcmd_core_num; core_id++) + { + if((&dev[core_id])==NULL) + continue; + spin_lock_irqsave(dev[core_id].spinlock, flags); + new_cmdbuf_node=dev[core_id].list_manager.head; + while(1) + { + if(new_cmdbuf_node==NULL) + break; + cmdbuf_obj_temp=(struct cmdbuf_obj*)new_cmdbuf_node->data; + if(cmdbuf_obj_temp->cmdbuf_id > 1) + { + PDEBUG("Process %px is flush: checking cmdbuf %d (done %d,linked %d wstat %d) process %px. remain %d\n", + filp, cmdbuf_obj_temp->cmdbuf_id, cmdbuf_obj_temp->cmdbuf_run_done,cmdbuf_obj_temp->cmdbuf_data_linked, + dev[core_id].working_state, + cmdbuf_obj_temp->filp,cmdbuf_used_residual); + } + if (dev[core_id].hwregs && (cmdbuf_obj_temp->filp == filp)) + { + if(cmdbuf_obj_temp->cmdbuf_data_linked==1 && + !cmdbuf_obj_temp->cmdbuf_run_done && dev[core_id].working_state==WORKING_STATE_WORKING) + { + u32 irq_status_ret; + spin_unlock_irqrestore(dev[core_id].spinlock, flags); + + PDEBUG(" start wait event of cmdbuf[%d]\n",cmdbuf_obj_temp->cmdbuf_id); + if(wait_event_timeout(*dev[core_id].wait_queue, + check_cmdbuf_irq(dev,cmdbuf_obj_temp,&irq_status_ret),msecs_to_jiffies(1000) ) + ) + { + spin_lock_irqsave(dev[core_id].spinlock, flags); + PDEBUG(" ## wait got cmdbuf[%d] done %d \n",cmdbuf_obj_temp->cmdbuf_id,cmdbuf_obj_temp->cmdbuf_run_done); + } + else + { + spin_lock_irqsave(dev[core_id].spinlock, flags); + PDEBUG(" ## wait timeout cmdbuf[%d] %d \n",cmdbuf_obj_temp->cmdbuf_id,cmdbuf_obj_temp->cmdbuf_run_done); + } + + } + + } + new_cmdbuf_node = new_cmdbuf_node->next; + } + + spin_unlock_irqrestore(dev[core_id].spinlock, flags); + + } + up(&vcmd_reserve_cmdbuf_sem[dev->vcmd_core_cfg.sub_module_type]); + return 0; +} + +static int __hantrovcmd_release(struct inode *inode, struct file *filp) +{ + struct hantrovcmd_dev *dev = hantrovcmd_data; + u32 core_id = 0; + u32 release_cmdbuf_num=0; + bi_list_node* new_cmdbuf_node=NULL; + struct cmdbuf_obj* cmdbuf_obj_temp=NULL; + bi_list_node* process_manager_node; + struct process_manager_obj* process_manager_obj=NULL; + int vcmd_aborted = 0; // vcmd is aborted in this function + struct cmdbuf_obj* restart_cmdbuf = NULL; + + unsigned long flags; + long retVal=0; + + PDEBUG("dev closed for process %px\n", (void *)filp); + down(&vcmd_reserve_cmdbuf_sem[dev->vcmd_core_cfg.sub_module_type]); //should be wait,do not use interruptible interface + + for (core_id = 0;core_id < total_vcmd_core_num; core_id++) + { + if((&dev[core_id])==NULL) + continue; + spin_lock_irqsave(dev[core_id].spinlock, flags); + new_cmdbuf_node=dev[core_id].list_manager.head; + while(1) + { + if(new_cmdbuf_node==NULL) + break; + cmdbuf_obj_temp=(struct cmdbuf_obj*)new_cmdbuf_node->data; + if(cmdbuf_obj_temp->cmdbuf_id > 1) + { + PDEBUG("Process %px is releasing: checking cmdbuf %d (done %d,linked %d wstat %d) process %px. remain %d\n", + filp, cmdbuf_obj_temp->cmdbuf_id, cmdbuf_obj_temp->cmdbuf_run_done,cmdbuf_obj_temp->cmdbuf_data_linked, + dev[core_id].working_state, + cmdbuf_obj_temp->filp,cmdbuf_used_residual); + } + if (dev[core_id].hwregs && (cmdbuf_obj_temp->filp == filp)) + { + if(cmdbuf_obj_temp->cmdbuf_run_done) + { + cmdbuf_obj_temp->cmdbuf_need_remove=1; + retVal=release_cmdbuf_node(&dev[core_id].list_manager,new_cmdbuf_node); + if(retVal==1) + cmdbuf_obj_temp->process_manager_obj = NULL; + } + else if(cmdbuf_obj_temp->cmdbuf_data_linked==0) + { + cmdbuf_obj_temp->cmdbuf_data_linked = 1; + cmdbuf_obj_temp->cmdbuf_run_done=1; + cmdbuf_obj_temp->cmdbuf_need_remove=1; + retVal=release_cmdbuf_node(&dev[core_id].list_manager,new_cmdbuf_node); + if(retVal==1) + cmdbuf_obj_temp->process_manager_obj = NULL; + } + else if(cmdbuf_obj_temp->cmdbuf_data_linked==1 && dev[core_id].working_state==WORKING_STATE_IDLE) + { + vcmd_delink_rm_cmdbuf(&dev[core_id], new_cmdbuf_node); + if(restart_cmdbuf == cmdbuf_obj_temp) + restart_cmdbuf = new_cmdbuf_node->next ? new_cmdbuf_node->next->data : NULL; + } + else if(cmdbuf_obj_temp->cmdbuf_data_linked==1 && dev[core_id].working_state==WORKING_STATE_WORKING) + { + bi_list_node* last_cmdbuf_node = NULL; + bi_list_node* done_cmdbuf_node = NULL; + int abort_cmdbuf_id; + int loop_count = 0; + u32 irq_status_ret; + + spin_unlock_irqrestore(dev[core_id].spinlock, flags); + if(wait_event_timeout(*dev[core_id].wait_queue, + check_cmdbuf_irq(dev,cmdbuf_obj_temp,&irq_status_ret),msecs_to_jiffies(1000)) + ) + { + spin_lock_irqsave(dev[core_id].spinlock, flags); + PDEBUG(" ## wait got cmdbuf[%d] done %d \n",cmdbuf_obj_temp->cmdbuf_id,cmdbuf_obj_temp->cmdbuf_run_done); + cmdbuf_obj_temp->cmdbuf_need_remove=1; + retVal=release_cmdbuf_node(&dev[core_id].list_manager,new_cmdbuf_node); + if(retVal==1) + cmdbuf_obj_temp->process_manager_obj = NULL; + } + else + { + spin_lock_irqsave(dev[core_id].spinlock, flags); + //abort the vcmd and wait + PDEBUG("Abort due to linked cmdbuf %d of current process.\n", cmdbuf_obj_temp->cmdbuf_id); + #ifdef VCMD_DEBUG_INTERNAL + printk_vcmd_register_debug((const void *)dev[core_id].hwregs, "Before trigger to 0"); + #endif + // disable abort interrupt + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_START_TRIGGER,0); + vcmd_aborted = 1; + software_triger_abort = 1; + #ifdef VCMD_DEBUG_INTERNAL + printk_vcmd_register_debug((const void *)dev[core_id].hwregs,"After trigger to 0"); + #endif + // wait vcmd core aborted and vcmd enters IDLE mode. + while (vcmd_get_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_WORK_STATE)) { + loop_count++; + if (!(loop_count % 10)) { + u32 irq_status = vcmd_read_reg((const void *)dev[core_id].hwregs, VCMD_REGISTER_INT_STATUS_OFFSET); + pr_err("hantrovcmd: expected idle state, but irq status = 0x%0x\n", irq_status); + pr_err("hantrovcmd: vcmd current status is %d\n", vcmd_get_register_value((const void *)dev[core_id].hwregs, dev[core_id].reg_mirror, HWIF_VCMD_WORK_STATE)); + } + mdelay(10); // wait 10ms + if (loop_count > 100) { // too long + pr_err("hantrovcmd: too long before vcmd core to IDLE state\n"); + process_manager_obj = (struct process_manager_obj*)filp->private_data; + if (process_manager_obj) + { + while(process_manager_obj->pm_count > 0) + { + pm_runtime_mark_last_busy(&dev[0].pdev->dev); + pm_runtime_put_autosuspend(&dev[0].pdev->dev); + process_manager_obj->pm_count--; + } + } + spin_unlock_irqrestore(dev[core_id].spinlock, flags); + up(&vcmd_reserve_cmdbuf_sem[dev->vcmd_core_cfg.sub_module_type]); + return -ERESTARTSYS; + } + } + dev[core_id].working_state = WORKING_STATE_IDLE; + // clear interrupt & restore abort_e + if (vcmd_get_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_IRQ_ABORT)) { + PDEBUG("Abort interrupt triggered, now clear it to avoid abort int...\n"); + vcmd_write_reg((const void *)dev[core_id].hwregs, VCMD_REGISTER_INT_STATUS_OFFSET, 0x1<<4); + PDEBUG("Now irq status = 0x%0x.\n", vcmd_read_reg((const void *)dev[core_id].hwregs, VCMD_REGISTER_INT_STATUS_OFFSET)); + } + + abort_cmdbuf_id = vcmd_get_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_CMDBUF_EXECUTING_ID); + PDEBUG("Abort when executing cmd buf %d.\n", abort_cmdbuf_id); + dev[core_id].sw_cmdbuf_rdy_num = 0; + dev[core_id].duration_without_int = 0; + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_EXE_CMDBUF_COUNT,0); + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_RDY_CMDBUF_COUNT,0); + + /* Mark cmdbuf_run_done to 1 for all the cmd buf executed. */ + done_cmdbuf_node = dev[core_id].list_manager.head; + while (done_cmdbuf_node) { + if (!((struct cmdbuf_obj*)done_cmdbuf_node->data)->cmdbuf_run_done) { + ((struct cmdbuf_obj*)done_cmdbuf_node->data)->cmdbuf_run_done = 1; + ((struct cmdbuf_obj*)done_cmdbuf_node->data)->cmdbuf_data_linked = 0; + PDEBUG("Set cmdbuf [%d] cmdbuf_run_done to 1.\n", ((struct cmdbuf_obj*)done_cmdbuf_node->data)->cmdbuf_id); + } + if (((struct cmdbuf_obj*)done_cmdbuf_node->data)->cmdbuf_id == abort_cmdbuf_id) + break; + done_cmdbuf_node = done_cmdbuf_node->next; + } + if (cmdbuf_obj_temp->cmdbuf_run_done) { + /* current cmdbuf is in fact has been executed, but due to interrupt is not triggered, the status is not updated. + Just delink and remove it from the list. */ + if (done_cmdbuf_node && done_cmdbuf_node->data) { + PDEBUG("done_cmdbuf_node is cmdbuf [%d].\n", ((struct cmdbuf_obj*)done_cmdbuf_node->data)->cmdbuf_id); + } + done_cmdbuf_node = done_cmdbuf_node->next; + if (done_cmdbuf_node) + restart_cmdbuf = (struct cmdbuf_obj*)done_cmdbuf_node->data; + if (restart_cmdbuf) { + PDEBUG("Set restart cmdbuf [%d] via if.\n", restart_cmdbuf->cmdbuf_id); + } + } else { + last_cmdbuf_node = new_cmdbuf_node; + /* cmd buf num from aborted cmd buf to current cmdbuf_obj_temp */ + if (cmdbuf_obj_temp->cmdbuf_id != abort_cmdbuf_id) { + last_cmdbuf_node = new_cmdbuf_node->previous; + + while (last_cmdbuf_node && + ((struct cmdbuf_obj*)last_cmdbuf_node->data)->cmdbuf_id != abort_cmdbuf_id) { + restart_cmdbuf = (struct cmdbuf_obj*)last_cmdbuf_node->data; + last_cmdbuf_node = last_cmdbuf_node->previous; + dev[core_id].sw_cmdbuf_rdy_num++; + dev[core_id].duration_without_int += restart_cmdbuf->executing_time; + PDEBUG("Keep valid cmdbuf [%d] in the list.\n", restart_cmdbuf->cmdbuf_id); + } + } + if (restart_cmdbuf) { + PDEBUG("Set restart cmdbuf [%d] via else.\n", restart_cmdbuf->cmdbuf_id); + } + } + + // remove first linked cmdbuf from list + vcmd_delink_rm_cmdbuf(&dev[core_id], new_cmdbuf_node); + } + + } + software_triger_abort = 0; + release_cmdbuf_num++; + PDEBUG("vc8000e : release reserved cmdbuf,remain %d\n",cmdbuf_used_residual); + } + else if (vcmd_aborted && !cmdbuf_obj_temp->cmdbuf_run_done) { + /* VCMD is aborted, need to re-calculate the duration_without_int */ + if (!restart_cmdbuf) + restart_cmdbuf = cmdbuf_obj_temp; /* first cmdbuf to be restarted */ + dev[core_id].duration_without_int += cmdbuf_obj_temp->executing_time; + dev[core_id].sw_cmdbuf_rdy_num++; + } + new_cmdbuf_node = new_cmdbuf_node->next; + } + if (restart_cmdbuf && restart_cmdbuf->core_id == core_id) { + u32 irq_status1, irq_status2; + PDEBUG("Restart from cmdbuf [%d] after aborting.\n", restart_cmdbuf->cmdbuf_id); + + irq_status1 = vcmd_read_reg((const void *)dev[core_id].hwregs, VCMD_REGISTER_INT_STATUS_OFFSET); + vcmd_write_reg((const void *)dev[core_id].hwregs, VCMD_REGISTER_INT_STATUS_OFFSET, irq_status1); + irq_status2 = vcmd_read_reg((const void *)dev[core_id].hwregs, VCMD_REGISTER_INT_STATUS_OFFSET); + PDEBUG("Clear irq status from 0x%0x -> 0x%0x\n", irq_status1, irq_status2); + if (mmu_enable) { + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror, HWIF_VCMD_EXECUTING_CMD_ADDR, + (u32)(restart_cmdbuf->mmu_cmdbuf_busAddress)); + if(sizeof(size_t) == 8) { + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror, HWIF_VCMD_EXECUTING_CMD_ADDR_MSB,(u32)((u64)(restart_cmdbuf->mmu_cmdbuf_busAddress)>>32)); + } else { + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror, HWIF_VCMD_EXECUTING_CMD_ADDR_MSB, 0); + } + } else { + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror, HWIF_VCMD_EXECUTING_CMD_ADDR,(u32)(restart_cmdbuf->cmdbuf_busAddress-base_ddr_addr)); + if(sizeof(size_t) == 8) { + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror, HWIF_VCMD_EXECUTING_CMD_ADDR_MSB,(u32)((u64)(restart_cmdbuf->cmdbuf_busAddress-base_ddr_addr)>>32)); + } else { + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror, HWIF_VCMD_EXECUTING_CMD_ADDR_MSB, 0); + } + } + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_EXE_CMDBUF_COUNT,0); + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_EXE_CMDBUF_LENGTH,(u32)((restart_cmdbuf->cmdbuf_size+7)/8)); + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_CMDBUF_EXECUTING_ID,restart_cmdbuf->cmdbuf_id); + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_RDY_CMDBUF_COUNT,dev->sw_cmdbuf_rdy_num); +#ifdef VCMD_DEBUG_INTERNAL + printk_vcmd_register_debug((const void *)dev[core_id].hwregs, "before restart"); +#endif + vcmd_write_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_START_TRIGGER,1); + + PDEBUG("Restart from cmdbuf [%d] dev register sw_cmdbuf_rdy_num is %d \n", restart_cmdbuf->cmdbuf_id, + vcmd_get_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_RDY_CMDBUF_COUNT)); + + PDEBUG("Restart from cmdbuf [%d] after aborting: start trigger = %d.\n", restart_cmdbuf->cmdbuf_id, + vcmd_get_register_value((const void *)dev[core_id].hwregs,dev[core_id].reg_mirror,HWIF_VCMD_START_TRIGGER)); + PDEBUG("dev state from %d -> WORKING.\n", dev[core_id].working_state); + dev[core_id].working_state = WORKING_STATE_WORKING; +#ifdef VCMD_DEBUG_INTERNAL + printk_vcmd_register_debug((const void *)dev[core_id].hwregs, "after restart"); +#endif + } else { + PDEBUG("No more command buffer to be restarted!\n"); + } + spin_unlock_irqrestore(dev[core_id].spinlock, flags); + // VCMD aborted but not restarted, nedd to wake up + if (vcmd_aborted && !restart_cmdbuf) + wake_up_all(dev[core_id].wait_queue); + } + if(release_cmdbuf_num) + wake_up_all(&vcmd_cmdbuf_memory_wait); + + // free reserved but unlinkd node + // Here is powerfull free all this process reserved/linked cmdbuf + free_cmdbuf_not_linked_by_flip(filp); + + spin_lock_irqsave(&vcmd_process_manager_lock, flags); + process_manager_node = global_process_manager.head; + while(1) + { + if(process_manager_node == NULL) + break; + process_manager_obj = (struct process_manager_obj*)process_manager_node->data; + if(process_manager_obj->filp == filp) + break; + process_manager_node = process_manager_node->next; + } + if (process_manager_obj) + { + while(process_manager_obj->pm_count > 0) + { + pm_runtime_mark_last_busy(&dev[0].pdev->dev); + pm_runtime_put_autosuspend(&dev[0].pdev->dev); + process_manager_obj->pm_count--; + } + } + //remove node from list + PDEBUG("process node %px for filp to be removed: %px remain %d\n", (void *)process_manager_node, (void *)process_manager_obj->filp,cmdbuf_used_residual); + bi_list_remove_node(&global_process_manager,process_manager_node); + spin_unlock_irqrestore(&vcmd_process_manager_lock, flags); + free_process_manager_node(process_manager_node); + up(&vcmd_reserve_cmdbuf_sem[dev->vcmd_core_cfg.sub_module_type]); + return 0; +} + +static int hantrovcmd_release(struct inode *inode, struct file *filp) +{ + int i; + int ret = 0; + ret = __hantrovcmd_release(inode,filp); +#ifdef HANTROMMU_SUPPORT + for(i = 0; i < total_vcmd_core_num; i++) + { + if (mmu_hwregs[i][0] != NULL) + MMURelease(filp,mmu_hwregs[i][0]); + } +#endif + return ret; +} + +static bool hantroenc_cmdbuf_range(size_t addr, size_t size); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(6, 4, 0) +static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ + if (!pfn_valid(pfn)) { + return pgprot_noncached(vma_prot); + } else if (file->f_flags & O_SYNC) { + return pgprot_writecombine(vma_prot); + } + + return vma_prot; +} +#endif +static int mmap_cmdbuf_mem(struct file *file, struct vm_area_struct *vma) +{ + size_t size = vma->vm_end - vma->vm_start; + phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; + + /* Does it even fit in phys_addr_t? */ + if (offset >> PAGE_SHIFT != vma->vm_pgoff) + return -EINVAL; + + /* It's illegal to wrap around the end of the physical address space. */ + if (offset + (phys_addr_t)size - 1 < offset) + return -EINVAL; + + + vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, + size, + vma->vm_page_prot); + + /* Remap-pfn-range will mark the range VM_IO */ + if (remap_pfn_range(vma, + vma->vm_start, + vma->vm_pgoff, + size, + vma->vm_page_prot)) + { + return -EAGAIN; + } + + return 0; +} + +static int mmap_mem(struct file *file, struct vm_area_struct *vma) +{ + size_t size = vma->vm_end - vma->vm_start; + phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; + + if (hantroenc_cmdbuf_range(offset, size)) + { + return mmap_cmdbuf_mem(file, vma); + } + else + { + return -EINVAL; + /*TODO: need check if not need enc,in this condition*/ + //return allocator_mmap(file,vma); + } +} + +/* VFS methods */ +static struct file_operations hantrovcmd_fops = { + .owner= THIS_MODULE, + .open = hantrovcmd_open, + .flush = hantrovcmd_flush, + .release = hantrovcmd_release, + .unlocked_ioctl = hantrovcmd_ioctl, + .mmap = mmap_mem, + .fasync = NULL, +}; + +static u32 vcmd_release_AXIFE_IO(void) +{ +#ifdef HANTROAXIFE_SUPPORT + int i=0, j=0; + + for(i=0; idev, CMDBUF_POOL_TOTAL_SIZE*2 + CMDBUF_VCMD_REGISTER_TOTAL_SIZE, + vcmd_buf_mem_pool.virtualAddress, vcmd_buf_mem_pool.busAddress); + + return 0; +} + +static u32 MMU_Kernel_unmap(void) +{ +#ifdef HANTROMMU_SUPPORT + struct kernel_addr_desc addr; + + if (vcmd_buf_mem_pool.virtualAddress) { + if (mmu_enable) { + addr.bus_address = vcmd_buf_mem_pool.busAddress - gBaseDDRHw; + addr.size = vcmd_buf_mem_pool.size; + MMUKernelMemNodeUnmap(&addr); + } + } + if (vcmd_status_buf_mem_pool.virtualAddress) { + if (mmu_enable) { + addr.bus_address = vcmd_status_buf_mem_pool.busAddress - gBaseDDRHw; + addr.size = vcmd_status_buf_mem_pool.size; + MMUKernelMemNodeUnmap(&addr); + } + } + if (vcmd_registers_mem_pool.virtualAddress) { + if (mmu_enable) { + addr.bus_address = vcmd_registers_mem_pool.busAddress - gBaseDDRHw; + addr.size = vcmd_registers_mem_pool.size; + MMUKernelMemNodeUnmap(&addr); + } + } +#endif + + return 0; +} + +static void vcmd_link_cmdbuf(struct hantrovcmd_dev *dev,bi_list_node* last_linked_cmdbuf_node) +{ + bi_list_node* new_cmdbuf_node=NULL; + bi_list_node* next_cmdbuf_node=NULL; + struct cmdbuf_obj* cmdbuf_obj=NULL; + struct cmdbuf_obj* next_cmdbuf_obj=NULL; + u32 * jmp_addr=NULL; + u32 operation_code; + new_cmdbuf_node = last_linked_cmdbuf_node; + //for the first cmdbuf. + if(new_cmdbuf_node!=NULL) + { + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if((cmdbuf_obj->cmdbuf_data_linked==0) ) + { + dev->sw_cmdbuf_rdy_num ++; + cmdbuf_obj->cmdbuf_data_linked=1; + dev->duration_without_int = 0; + if(cmdbuf_obj->has_end_cmdbuf==0) + { + if(cmdbuf_obj->no_normal_int_cmdbuf==1) + { + dev->duration_without_int = cmdbuf_obj->executing_time; + //maybe nop is modified, so write back. + if(dev->duration_without_int>=INT_MIN_SUM_OF_IMAGE_SIZE) + { + jmp_addr = cmdbuf_obj->cmdbuf_virtualAddress + (cmdbuf_obj->cmdbuf_size/4); + operation_code = *(jmp_addr-4); + operation_code = JMP_IE_1|operation_code; + *(jmp_addr-4) = operation_code; + dev->duration_without_int = 0; + } + + } + } + } + } + while(1) + { + if(new_cmdbuf_node==NULL) + break; + if(new_cmdbuf_node->next==NULL) + break; + next_cmdbuf_node = new_cmdbuf_node->next; + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + next_cmdbuf_obj = (struct cmdbuf_obj*)next_cmdbuf_node->data; + if(cmdbuf_obj->has_end_cmdbuf==0 && !next_cmdbuf_obj->cmdbuf_run_done) + { + //need to link, current cmdbuf link to next cmdbuf + jmp_addr = cmdbuf_obj->cmdbuf_virtualAddress + (cmdbuf_obj->cmdbuf_size/4); + if(dev->hw_version_id > HW_ID_1_0_C ) + { + //set next cmdbuf id + *(jmp_addr-1) = next_cmdbuf_obj->cmdbuf_id; + } + if (mmu_enable) { + if(sizeof(size_t) == 8) { + *(jmp_addr-2)=(u32)((u64)(next_cmdbuf_obj->mmu_cmdbuf_busAddress)>>32); + } else { + *(jmp_addr-2)=0; + } + *(jmp_addr-3)=(u32)(next_cmdbuf_obj->mmu_cmdbuf_busAddress); + pr_debug("vcmd_link_cmdbuf: next_cmdbuf_obj->mmu_cmdbuf_busAddress=0x%08x\n", next_cmdbuf_obj->mmu_cmdbuf_busAddress); + } else { + if(sizeof(size_t) == 8) { + *(jmp_addr-2)=(u32)((u64)(next_cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr)>>32); + } else { + *(jmp_addr-2)=0; + } + *(jmp_addr-3)=(u32)(next_cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr); + } + operation_code = *(jmp_addr-4); + operation_code >>=16; + operation_code <<=16; + *(jmp_addr-4)=(u32)(operation_code |JMP_RDY_1|((next_cmdbuf_obj->cmdbuf_size+7)/8)); + next_cmdbuf_obj->cmdbuf_data_linked = 1; + dev->sw_cmdbuf_rdy_num ++; + //modify nop code of next cmdbuf + if(next_cmdbuf_obj->has_end_cmdbuf==0) + { + if(next_cmdbuf_obj->no_normal_int_cmdbuf==1) + { + dev->duration_without_int +=next_cmdbuf_obj->executing_time; + + //maybe we see the modified nop before abort, so need to write back. + if(dev->duration_without_int>=INT_MIN_SUM_OF_IMAGE_SIZE) + { + jmp_addr = next_cmdbuf_obj->cmdbuf_virtualAddress + (next_cmdbuf_obj->cmdbuf_size/4); + operation_code = *(jmp_addr-4); + operation_code = JMP_IE_1|operation_code; + *(jmp_addr-4) = operation_code; + dev->duration_without_int = 0; + } + } + } + else + { + dev->duration_without_int = 0; + } +#ifdef VCMD_DEBUG_INTERNAL + { + u32 i; + pr_info("vcmd link, last cmdbuf content\n"); + for(i=cmdbuf_obj->cmdbuf_size/4 -8;icmdbuf_size/4;i++) + { + pr_info("current linked cmdbuf data %d =0x%x\n",i,*(cmdbuf_obj->cmdbuf_virtualAddress+i)); + } + } +#endif + } + new_cmdbuf_node = new_cmdbuf_node->next; + } + return; +} +/* delink all the cmd buffers from the cmdbuf in front of last_linked_cmdbuf_node + to head of the list. All the cmd bufs marked as X will be delinked. + E.g., + X->X->...->X->L->L-> ... ->L + ^ ^ ^ + head last_linked_cmdbuf_node tail + */ + +static void vcmd_delink_cmdbuf(struct hantrovcmd_dev *dev,bi_list_node* last_linked_cmdbuf_node) +{ + bi_list_node* new_cmdbuf_node=NULL; + struct cmdbuf_obj* cmdbuf_obj=NULL; + + new_cmdbuf_node = last_linked_cmdbuf_node; + while(1) + { + if(new_cmdbuf_node==NULL) + break; + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if(cmdbuf_obj->cmdbuf_data_linked) + { + cmdbuf_obj->cmdbuf_data_linked = 0; + } + else + break; + new_cmdbuf_node = new_cmdbuf_node->next; + } + dev->sw_cmdbuf_rdy_num=0; +} + +static void ConfigAIIXFE_MMU_BYVCMD(struct hantrovcmd_dev **device) +{ +#ifdef HANTROVCMD_ENABLE_IP_SUPPORT + u32 i = 0; + u64 address = 0; + u32 mirror_index, register_index, register_value; + u32 write_command = 0; + if(!device) return; + struct hantrovcmd_dev *dev = *device; + + mirror_index = VCMD_REGISTER_INDEX_SW_INIT_CMD0; + write_command = OPCODE_WREG|(1<<26)|(1<<16); +#ifdef HANTROAXIFE_SUPPORT + //enable AXIFE by VCMD + for(i=0; i<2; i++) + { + if(dev->vcmd_core_cfg.submodule_axife_addr[i] != 0xffff) + { + register_index = AXI_REG10_SW_FRONTEND_EN; + register_value = 0x02; + dev->reg_mirror[mirror_index++] = write_command|(dev->vcmd_core_cfg.submodule_axife_addr[i]+register_index); + dev->reg_mirror[mirror_index++] = register_value; + + register_index = AXI_REG11_SW_WORK_MODE; + register_value = 0x00; + dev->reg_mirror[mirror_index++] = write_command|(dev->vcmd_core_cfg.submodule_axife_addr[i]+register_index); + dev->reg_mirror[mirror_index++] = register_value; + } + } +#endif +#ifdef HANTROMMU_SUPPORT + //enable MMU by VCMD + address = GetMMUAddress(); + pr_info("ConfigAIIXFE_MMU_BYVCMD: address = 0x%llx", address); + for(i=0; i<2; i++) + { + if(dev->vcmd_core_cfg.submodule_MMU_addr[i] != 0xffff) + { + register_index = MMU_REG_ADDRESS; + register_value = address; + dev->reg_mirror[mirror_index++] = write_command|(dev->vcmd_core_cfg.submodule_MMU_addr[i]+register_index); + dev->reg_mirror[mirror_index++] = register_value; + + register_index = MMU_REG_PAGE_TABLE_ID; + register_value = 0x10000; + dev->reg_mirror[mirror_index++] = write_command|(dev->vcmd_core_cfg.submodule_MMU_addr[i]+register_index); + dev->reg_mirror[mirror_index++] = register_value; + + register_index = MMU_REG_PAGE_TABLE_ID; + register_value = 0x00000; + dev->reg_mirror[mirror_index++] = write_command|(dev->vcmd_core_cfg.submodule_MMU_addr[i]+register_index); + dev->reg_mirror[mirror_index++] = register_value; + + register_index = MMU_REG_CONTROL; + register_value = 1; + dev->reg_mirror[mirror_index++] = write_command|(dev->vcmd_core_cfg.submodule_MMU_addr[i]+register_index); + dev->reg_mirror[mirror_index++] = register_value; + } + } +#endif + //END command + dev->reg_mirror[mirror_index++] = OPCODE_END; + dev->reg_mirror[mirror_index] = 0x00; + + for(i=0; ihwregs, register_index, dev->reg_mirror[i+VCMD_REGISTER_INDEX_SW_INIT_CMD0]); + } +#endif +} +static void vcmd_start(struct hantrovcmd_dev *dev,bi_list_node* first_linked_cmdbuf_node) +{ + struct cmdbuf_obj* cmdbuf_obj = NULL; + + if(dev->working_state == WORKING_STATE_IDLE) + { + if((first_linked_cmdbuf_node!=NULL) && dev->sw_cmdbuf_rdy_num) + { + cmdbuf_obj = (struct cmdbuf_obj*)first_linked_cmdbuf_node->data; +#ifdef VCMD_DEBUG_INTERNAL + printk_vcmd_register_debug((const void *)dev->hwregs, "vcmd_start enters"); +#endif + //0x40 +#ifdef HANTROVCMD_ENABLE_IP_SUPPORT + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_INIT_MODE,1); //when start vcmd, first vcmd is init mode +#endif + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_AXI_CLK_GATE_DISABLE,0); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_MASTER_OUT_CLK_GATE_DISABLE,1);//this bit should be set 1 only when need to reset dec400 + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_CORE_CLK_GATE_DISABLE,0); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_ABORT_MODE,0); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_RESET_CORE,0); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_RESET_ALL,0); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_START_TRIGGER,0); + //0x48 + if(dev->hw_version_id <= HW_ID_1_0_C) + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_INTCMD_EN,0xffff); + else + { + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_JMPP_EN,1); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_JMPD_EN,1); + } + + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_RESET_EN,1); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_ABORT_EN,1); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_CMDERR_EN,1); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_TIMEOUT_EN,1); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_BUSERR_EN,1); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_ENDCMD_EN,1); + //0x4c + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_TIMEOUT_EN,1); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_TIMEOUT_CYCLES,0x1dcd6500); + if (mmu_enable) { + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_EXECUTING_CMD_ADDR,(u32)(cmdbuf_obj->mmu_cmdbuf_busAddress)); + pr_debug("cmdbuf_obj->mmu_cmdbuf_busAddress=0x%08x\n", (u32)cmdbuf_obj->mmu_cmdbuf_busAddress); + if(sizeof(size_t) == 8) { + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_EXECUTING_CMD_ADDR_MSB,(u32)((u64)(cmdbuf_obj->mmu_cmdbuf_busAddress)>>32)); + } else { + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_EXECUTING_CMD_ADDR_MSB, 0); + } + } else { + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_EXECUTING_CMD_ADDR,(u32)(cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr)); + if(sizeof(size_t) == 8) { + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_EXECUTING_CMD_ADDR_MSB,(u32)((u64)(cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr)>>32)); + } else { + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_EXECUTING_CMD_ADDR_MSB, 0); + } + } + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_EXE_CMDBUF_LENGTH,(u32)((cmdbuf_obj->cmdbuf_size+7)/8)); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_RDY_CMDBUF_COUNT,dev->sw_cmdbuf_rdy_num); + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_MAX_BURST_LEN,0x10); + if(dev->hw_version_id > HW_ID_1_0_C ) + { + vcmd_write_register_value((const void *)dev->hwregs,dev->reg_mirror,HWIF_VCMD_CMDBUF_EXECUTING_ID,(u32)cmdbuf_obj->cmdbuf_id); + } + vcmd_write_reg((const void *)dev->hwregs,0x44,vcmd_read_reg((const void *)dev->hwregs,0x44)); + vcmd_write_reg((const void *)dev->hwregs,0x40,dev->reg_mirror[0x40/4]); + vcmd_write_reg((const void *)dev->hwregs,0x48,dev->reg_mirror[0x48/4]); + vcmd_write_reg((const void *)dev->hwregs,0x4c,dev->reg_mirror[0x4c/4]); + vcmd_write_reg((const void *)dev->hwregs,0x50,dev->reg_mirror[0x50/4]); + vcmd_write_reg((const void *)dev->hwregs,0x54,dev->reg_mirror[0x54/4]); + vcmd_write_reg((const void *)dev->hwregs,0x58,dev->reg_mirror[0x58/4]); + vcmd_write_reg((const void *)dev->hwregs,0x5c,dev->reg_mirror[0x5c/4]); + vcmd_write_reg((const void *)dev->hwregs,0x60,dev->reg_mirror[0x60/4]); + vcmd_write_reg((const void *)dev->hwregs,0x64,0xffffffff);//not interrupt cpu + + dev->working_state = WORKING_STATE_WORKING; + + ConfigAIIXFE_MMU_BYVCMD(&dev); + + //start + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_MASTER_OUT_CLK_GATE_DISABLE,1);//this bit should be set 1 only when need to reset dec400 + vcmd_set_register_mirror_value(dev->reg_mirror,HWIF_VCMD_START_TRIGGER,1); + PDEBUG("To write vcmd register 16:0x%x\n",dev->reg_mirror[0x40/4]); + vcmd_write_reg((const void *)dev->hwregs,0x40,dev->reg_mirror[0x40/4]); +#ifdef VCMD_DEBUG_INTERNAL + printk_vcmd_register_debug(dev->hwregs, "vcmd_start exits "); +#endif + } + } + +} + +static void create_read_all_registers_cmdbuf(struct exchange_parameter* input_para) +{ + u32 register_range[]={VCMD_ENCODER_REGISTER_SIZE, + VCMD_IM_REGISTER_SIZE, + VCMD_DECODER_REGISTER_SIZE, + VCMD_JPEG_ENCODER_REGISTER_SIZE, + VCMD_JPEG_DECODER_REGISTER_SIZE}; + u32 counter_cmdbuf_size=0; + u32 * set_base_addr=vcmd_buf_mem_pool.virtualAddress + input_para->cmdbuf_id*CMDBUF_MAX_SIZE/4; + //u32 *status_base_virt_addr=vcmd_status_buf_mem_pool.virtualAddress + input_para->cmdbuf_id*CMDBUF_MAX_SIZE/4+(vcmd_manager[input_para->module_type][0]->vcmd_core_cfg.submodule_main_addr/2/4+0); + ptr_t status_base_phy_addr=vcmd_status_buf_mem_pool.busAddress + input_para->cmdbuf_id*CMDBUF_MAX_SIZE+(vcmd_manager[input_para->module_type][0]->vcmd_core_cfg.submodule_main_addr/2+0); + u32 map_status_base_phy_addr=vcmd_status_buf_mem_pool.mmu_bus_address + input_para->cmdbuf_id*CMDBUF_MAX_SIZE+(vcmd_manager[input_para->module_type][0]->vcmd_core_cfg.submodule_main_addr/2+0); + u32 offset_inc=0; + u32 offset_inc_dec400=0; + if(vcmd_manager[input_para->module_type][0]->hw_version_id>HW_ID_1_0_C) + { + pr_info("vc8000_vcmd_driver:create cmdbuf data when hw_version_id = 0x%x\n",vcmd_manager[input_para->module_type][0]->hw_version_id); + + //read vcmd executing cmdbuf id registers to ddr for balancing core load. + *(set_base_addr+0) = (OPCODE_RREG) |(1<<16) |(EXECUTING_CMDBUF_ID_ADDR*4); + counter_cmdbuf_size += 4; + *(set_base_addr+1) = (u32)0; //will be changed in link stage + counter_cmdbuf_size += 4; + *(set_base_addr+2) = (u32)0; //will be changed in link stage + counter_cmdbuf_size += 4; + //alignment + *(set_base_addr+3) = 0; + counter_cmdbuf_size += 4; + + //read main IP all registers + *(set_base_addr+4) = (OPCODE_RREG) |((register_range[input_para->module_type]/4)<<16) |(vcmd_manager[input_para->module_type][0]->vcmd_core_cfg.submodule_main_addr+0); + counter_cmdbuf_size += 4; + if (mmu_enable) { + *(set_base_addr+5) = map_status_base_phy_addr; + } else { + *(set_base_addr+5) = (u32)(status_base_phy_addr-base_ddr_addr); + } + counter_cmdbuf_size += 4; + if (mmu_enable) { + *(set_base_addr+6) = 0; + } else { + if(sizeof(size_t) == 8) { + *(set_base_addr+6) = (u32)((u64)(status_base_phy_addr-base_ddr_addr)>>32); + } else { + *(set_base_addr+6) = 0; + } + } + counter_cmdbuf_size += 4; + //alignment + *(set_base_addr+7) = 0; + counter_cmdbuf_size += 4; + + if(vcmd_manager[input_para->module_type][0]->vcmd_core_cfg.submodule_L2Cache_addr != 0xffff) + { + //read L2 cache register + offset_inc = 4; + status_base_phy_addr=vcmd_status_buf_mem_pool.busAddress + input_para->cmdbuf_id*CMDBUF_MAX_SIZE+(vcmd_manager[input_para->module_type][0]->vcmd_core_cfg.submodule_L2Cache_addr/2+0); + map_status_base_phy_addr=vcmd_status_buf_mem_pool.mmu_bus_address + input_para->cmdbuf_id*CMDBUF_MAX_SIZE+(vcmd_manager[input_para->module_type][0]->vcmd_core_cfg.submodule_L2Cache_addr/2+0); + //read L2cache IP first register + *(set_base_addr+8) = (OPCODE_RREG) |(1<<16) |(vcmd_manager[input_para->module_type][0]->vcmd_core_cfg.submodule_L2Cache_addr+0); + counter_cmdbuf_size += 4; + if (mmu_enable) { + *(set_base_addr+9) = map_status_base_phy_addr; + } else { + *(set_base_addr+9) = (u32)(status_base_phy_addr-base_ddr_addr); + } + counter_cmdbuf_size += 4; + if (mmu_enable) { + *(set_base_addr+10) = 0; + } else { + if(sizeof(size_t) == 8) { + *(set_base_addr+10) = (u32)((u64)(status_base_phy_addr-base_ddr_addr)>>32); + } else { + *(set_base_addr+10) = 0; + } + } + counter_cmdbuf_size += 4; + //alignment + *(set_base_addr+11) = 0; + counter_cmdbuf_size += 4; + } + if(vcmd_manager[input_para->module_type][0]->vcmd_core_cfg.submodule_dec400_addr != 0xffff) + { + //read dec400 register + offset_inc_dec400 = 4; + status_base_phy_addr=vcmd_status_buf_mem_pool.busAddress + input_para->cmdbuf_id*CMDBUF_MAX_SIZE+(vcmd_manager[input_para->module_type][0]->vcmd_core_cfg.submodule_dec400_addr/2+0); + map_status_base_phy_addr=vcmd_status_buf_mem_pool.mmu_bus_address + input_para->cmdbuf_id*CMDBUF_MAX_SIZE+(vcmd_manager[input_para->module_type][0]->vcmd_core_cfg.submodule_dec400_addr/2+0); + //read DEC400 IP first register + *(set_base_addr+8+offset_inc) = (OPCODE_RREG) |(0x2b<<16) |(vcmd_manager[input_para->module_type][0]->vcmd_core_cfg.submodule_dec400_addr+0); + counter_cmdbuf_size += 4; + if (mmu_enable) + { + *(set_base_addr+9+offset_inc) = map_status_base_phy_addr; + } + else + { + *(set_base_addr+9+offset_inc) = (u32)(status_base_phy_addr-base_ddr_addr); + } + counter_cmdbuf_size += 4; + if (mmu_enable) + { + *(set_base_addr+10+offset_inc) = 0; + } + else + { + if(sizeof(size_t) == 8) + { + *(set_base_addr+10+offset_inc) = (u32)((u64)(status_base_phy_addr-base_ddr_addr)>>32); + } + else + { + *(set_base_addr+10+offset_inc) = 0; + } + } + counter_cmdbuf_size += 4; + //alignment + *(set_base_addr+11+offset_inc) = 0; + counter_cmdbuf_size += 4; + } + +#if 0 + //INT code, interrupt immediately + *(set_base_addr+4) = (OPCODE_INT) |0 |input_para->cmdbuf_id; + counter_cmdbuf_size += 4; + //alignment + *(set_base_addr+5) = 0; + counter_cmdbuf_size += 4; +#endif + //read vcmd registers to ddr + *(set_base_addr+8+offset_inc + offset_inc_dec400) = (OPCODE_RREG) |(27<<16) |(0); + counter_cmdbuf_size += 4; + *(set_base_addr+9+offset_inc + offset_inc_dec400) = (u32)0; //will be changed in link stage + counter_cmdbuf_size += 4; + *(set_base_addr+10+offset_inc + offset_inc_dec400) = (u32)0; //will be changed in link stage + counter_cmdbuf_size += 4; + //alignment + *(set_base_addr+11+offset_inc + offset_inc_dec400) = 0; + counter_cmdbuf_size += 4; + //JMP RDY = 0 + *(set_base_addr +12+offset_inc + offset_inc_dec400)= (OPCODE_JMP_RDY0) |0 |JMP_IE_1|0; + counter_cmdbuf_size += 4; + *(set_base_addr +13+offset_inc + offset_inc_dec400) = 0; + counter_cmdbuf_size += 4; + *(set_base_addr +14+offset_inc + offset_inc_dec400) = 0; + counter_cmdbuf_size += 4; + *(set_base_addr +15+offset_inc + offset_inc_dec400) = input_para->cmdbuf_id; + //don't add the last alignment DWORD in order to identify END command or JMP command. + //counter_cmdbuf_size += 4; + input_para->cmdbuf_size=(16+offset_inc + offset_inc_dec400)*4; + } + else + { + pr_info("vc8000_vcmd_driver:create cmdbuf data when hw_version_id = 0x%x\n",vcmd_manager[input_para->module_type][0]->hw_version_id); + //read all registers + *(set_base_addr+0) = (OPCODE_RREG) |((register_range[input_para->module_type]/4)<<16) |(vcmd_manager[input_para->module_type][0]->vcmd_core_cfg.submodule_main_addr+0); + counter_cmdbuf_size += 4; + if (mmu_enable) { + *(set_base_addr+1) = map_status_base_phy_addr; + } else { + *(set_base_addr+1) = (u32)(status_base_phy_addr-base_ddr_addr); + } + counter_cmdbuf_size += 4; + if (mmu_enable) { + *(set_base_addr+2) = 0; + } else { + if(sizeof(size_t) == 8) { + *(set_base_addr+2) = (u32)((u64)(status_base_phy_addr-base_ddr_addr)>>32); + } else { + *(set_base_addr+2) = 0; + } + } + counter_cmdbuf_size += 4; + //alignment + *(set_base_addr+3) = 0; + counter_cmdbuf_size += 4; +#if 0 + //INT code, interrupt immediately + *(set_base_addr+4) = (OPCODE_INT) |0 |input_para->cmdbuf_id; + counter_cmdbuf_size += 4; + //alignment + *(set_base_addr+5) = 0; + counter_cmdbuf_size += 4; +#endif + //JMP RDY = 0 + *(set_base_addr +4)= (OPCODE_JMP_RDY0) |0 |JMP_IE_1|0; + counter_cmdbuf_size += 4; + *(set_base_addr +5) = 0; + counter_cmdbuf_size += 4; + *(set_base_addr +6) = 0; + counter_cmdbuf_size += 4; + *(set_base_addr +7) = input_para->cmdbuf_id; + //don't add the last alignment DWORD in order to identify END command or JMP command. + //counter_cmdbuf_size += 4; + input_para->cmdbuf_size=8*4; + } + +} +static void read_main_module_all_registers(u32 main_module_type) +{ + int ret; + struct exchange_parameter input_para; + u32 irq_status_ret=0; + u32 *status_base_virt_addr; + + input_para.executing_time=0; + input_para.priority=CMDBUF_PRIORITY_NORMAL; + input_para.module_type = main_module_type; + input_para.cmdbuf_size=0; + ret = reserve_cmdbuf(NULL,&input_para); + vcmd_manager[main_module_type][0]->status_cmdbuf_id = input_para.cmdbuf_id; + create_read_all_registers_cmdbuf(&input_para); + link_and_run_cmdbuf(NULL,&input_para); + //msleep(1000); + hantrovcmd_isr(input_para.core_id, &hantrovcmd_data[input_para.core_id]); + wait_cmdbuf_ready(NULL,input_para.cmdbuf_id,&irq_status_ret); + + pr_info("%s:record_idle:busy_count = %d\n",__func__,encoder_get_devfreq_priv_data()->busy_count); + status_base_virt_addr=vcmd_status_buf_mem_pool.virtualAddress + input_para.cmdbuf_id*CMDBUF_MAX_SIZE/4+(vcmd_manager[input_para.module_type][0]->vcmd_core_cfg.submodule_main_addr/2/4+0); + pr_info("vc8000_vcmd_driver: main module register 0:0x%x\n",*status_base_virt_addr); + pr_info("vc8000_vcmd_driver: main module register 80:0x%x\n",*(status_base_virt_addr+80)); + pr_info("vc8000_vcmd_driver: main module register 214:0x%x\n",*(status_base_virt_addr+214)); + pr_info("vc8000_vcmd_driver: main module register 226:0x%x\n", *(status_base_virt_addr+226)); + pr_info("vc8000_vcmd_driver: main module register 287:0x%x\n", *(status_base_virt_addr+287)); + //don't release cmdbuf because it can be used repeatedly + //release_cmdbuf(input_para.cmdbuf_id); +} + +/*------------------------------------------------------------------------------ + Function name : vcmd_pcie_init + Description : Initialize PCI Hw access + + Return type : int + ------------------------------------------------------------------------------*/ +static int vcmd_init(struct platform_device *pdev) +{ + struct pci_dev *g_vcmd_dev = NULL; /* PCI device structure. */ + unsigned long g_vcmd_base_ddr_hw; /* PCI base register address (memalloc) */ + + vcmd_buf_mem_pool.virtualAddress = dma_alloc_coherent(&pdev->dev, CMDBUF_POOL_TOTAL_SIZE*2 + CMDBUF_VCMD_REGISTER_TOTAL_SIZE, + &vcmd_buf_mem_pool.busAddress, GFP_KERNEL | GFP_DMA); + + pr_info("Base memory val 0x%llx\n", vcmd_buf_mem_pool.busAddress); + + vcmd_buf_mem_pool.size =CMDBUF_POOL_TOTAL_SIZE; + pr_info("Init: vcmd_buf_mem_pool.busAddress=0x%llx.\n",(long long unsigned int)vcmd_buf_mem_pool.busAddress); + + if (vcmd_buf_mem_pool.virtualAddress == NULL ) { + pr_info("Init: failed to ioremap.\n"); + return -1; + } + pr_info("Init: vcmd_buf_mem_pool.virtualAddress=0x%llx.\n",(long long unsigned int)vcmd_buf_mem_pool.virtualAddress); + + vcmd_status_buf_mem_pool.busAddress = (void *)vcmd_buf_mem_pool.busAddress+CMDBUF_POOL_TOTAL_SIZE; + vcmd_status_buf_mem_pool.virtualAddress = (void *)vcmd_buf_mem_pool.virtualAddress+CMDBUF_POOL_TOTAL_SIZE; + vcmd_status_buf_mem_pool.size =CMDBUF_POOL_TOTAL_SIZE; + pr_info("Init: vcmd_status_buf_mem_pool.busAddress=0x%llx.\n",(long long unsigned int)vcmd_status_buf_mem_pool.busAddress); + + if (vcmd_status_buf_mem_pool.virtualAddress == NULL ) { + pr_info("Init: failed to ioremap.\n"); + return -1; + } + pr_info("Init: vcmd_status_buf_mem_pool.virtualAddress=0x%llx.\n",(long long unsigned int)vcmd_status_buf_mem_pool.virtualAddress); + + vcmd_registers_mem_pool.busAddress = (void *)vcmd_buf_mem_pool.busAddress+CMDBUF_POOL_TOTAL_SIZE*2; + vcmd_registers_mem_pool.virtualAddress = (void *)vcmd_buf_mem_pool.virtualAddress+CMDBUF_POOL_TOTAL_SIZE*2; + vcmd_registers_mem_pool.size =CMDBUF_VCMD_REGISTER_TOTAL_SIZE; + pr_info("Init: vcmd_registers_mem_pool.busAddress=0x%llx.\n",(long long unsigned int)vcmd_registers_mem_pool.busAddress); + + if (vcmd_registers_mem_pool.virtualAddress == NULL ) { + pr_info("Init: failed to ioremap.\n"); + return -1; + } + pr_info("Init: vcmd_registers_mem_pool.virtualAddress=0x%llx.\n",(long long unsigned int)vcmd_registers_mem_pool.virtualAddress); + + return 0; + +out_pci_disable_device: + pci_disable_device(g_vcmd_dev); +out: + return -1; +} + +static ssize_t encoder_config_write(struct file *filp, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct hantrovcmd_dev *dev = hantrovcmd_data; + unsigned long value; + int ret; + + if (count > VC8000E_MAX_CONFIG_LEN) + count = VC8000E_MAX_CONFIG_LEN; + else if (count <= 2) + return 0; + + ret = copy_from_user(dev->config_buf, userbuf, count); + if (ret) { + ret = -EFAULT; + goto out; + } + + //pr_info("hantrodec config: %s\n", dev->config_buf); + switch (dev->config_buf[0]) { + case 'd': + value = simple_strtoul(&(dev->config_buf[1]), NULL, 10); + pm_runtime_set_autosuspend_delay(&dev->pdev->dev, value); + pr_info("Set pm runtime auto suspend delay to %ldms\n", value); + break; + case 'p': + if (strncmp(&(dev->config_buf[0]),"pfreq-en",8) == 0) + { + debug_pr_devfreq_info = 1; + } + else if (strncmp(&(dev->config_buf[0]),"pfreq-dis",9) == 0) + { + debug_pr_devfreq_info = 0; + } + pr_info("cmd %s set debug_pr_devfreq_info %ld \n",&(dev->config_buf[0]),debug_pr_devfreq_info); + break; + default: + pr_warn("Unsupported config!\n"); + } + +out: + return ret < 0 ? ret : count; +} + +static ssize_t encoder_config_read(struct file *filp, + char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct hantrovcmd_dev *dev = hantrovcmd_data; + memset(dev->config_buf, 0, VC8000E_MAX_CONFIG_LEN); + return 0; +} + +static const struct file_operations encoder_debug_ops = { + .write = encoder_config_write, + .read = encoder_config_read, + .open = simple_open, + .llseek = generic_file_llseek, +}; + +static int encoder_add_debugfs(struct platform_device *pdev) +{ + root_debugfs_dir = debugfs_create_dir("vc8000e",NULL); + if (!root_debugfs_dir) { + dev_err(&pdev->dev, "Failed to create vc8000e debugfs\n"); + return -EINVAL; + } + + dev_info(&pdev->dev, "Create vc8000e debugfs.\n"); + + debugfs_create_file("config", 0600, root_debugfs_dir, + hantrovcmd_data, &encoder_debug_ops); + return 0; +} + + +/*----------------------------------------- + platform register +-----------------------------------------*/ + +static const struct of_device_id hantro_of_match[] = { + { .compatible = "thead,light-vc8000e", }, + { .compatible = "xuantie,th1520-vc8000e", }, + { /* sentinel */ }, +}; + +static int check_power_domain(void) +{ + struct device_node *dn = NULL; + struct property *info = NULL; + dn = of_find_node_by_name(NULL, "venc"); + if (dn != NULL) + info = of_find_property(dn, "power-domains", NULL); + pr_debug("%s, %d: power gating is %s\n", __func__, __LINE__, + (info == NULL) ? "disabled" : "enabled"); + return (info == NULL) ? 0 : 1; +} + +static int encoder_runtime_suspend(struct device *dev) +{ + struct hantrovcmd_dev *encdev = hantrovcmd_data; + + pr_debug("%s, %d: Disable clock\n", __func__, __LINE__); + + clk_disable_unprepare(encdev->cclk); + clk_disable_unprepare(encdev->aclk); + clk_disable_unprepare(encdev->pclk); + encoder_devfreq_set_rate(dev); //for the last set rate request,may not handled in cmd + encoder_devfreq_suspend(encoder_get_devfreq_priv_data()); + return 0; +} + +static int encoder_runtime_resume(struct device *dev) +{ + struct hantrovcmd_dev *encdev = hantrovcmd_data; + int ret; + + ret = clk_prepare_enable(encdev->cclk); + if (ret < 0) { + dev_err(dev, "could not prepare or enable core clock\n"); + return ret; + } + + ret = clk_prepare_enable(encdev->aclk); + if (ret < 0) { + dev_err(dev, "could not prepare or enable axi clock\n"); + clk_disable_unprepare(encdev->cclk); + return ret; + } + + ret = clk_prepare_enable(encdev->pclk); + if (ret < 0) { + dev_err(dev, "could not prepare or enable apb clock\n"); + clk_disable_unprepare(encdev->cclk); + clk_disable_unprepare(encdev->aclk); + return ret; + } + + if (encdev->has_power_domains) { +#ifdef HANTROMMU_SUPPORT + MMURestore(mmu_hwregs); +#endif + vcmd_reset(); + } + pr_debug("%s, %d: Enabled clock\n", __func__, __LINE__); + encoder_devfreq_resume(encoder_get_devfreq_priv_data()); + + return 0; +} + +void encoder_vcmd_suspend_record(void) +{ + if (!hantrovcmd_data) { + return; + } + int i; + struct hantrovcmd_dev* dev = NULL; + unsigned long flags; + struct cmdbuf_obj* cmdbuf_obj; + int timeout = 100; + for(i=0;iworking_state == WORKING_STATE_WORKING ) + dev->suspend_running_cmdbuf_node = dev->last_linked_cmdbuf_node; + else + dev->suspend_running_cmdbuf_node = NULL; + + dev->suspend_entered = true; + if(dev->last_linked_cmdbuf_node) + { + cmdbuf_obj = (struct cmdbuf_obj*)(dev->last_linked_cmdbuf_node->data); + while(timeout--){ + if(cmdbuf_obj->cmdbuf_run_done) + break; + udelay(1000); + } + } + pr_info("%s: core %d working state %s ,node %px \n",__func__,i, + dev->working_state == WORKING_STATE_WORKING ? "working":"idle", + dev->suspend_running_cmdbuf_node); + } +} + +int encoder_vcmd_resume_start(void) +{ + if (!hantrovcmd_data) { + return 0; + } + int i; + struct hantrovcmd_dev* dev = NULL; + bi_list_node* last_cmdbuf_node; + int ret; + for(i=0;iworking_state = WORKING_STATE_IDLE; + last_cmdbuf_node = dev->suspend_running_cmdbuf_node; + if(last_cmdbuf_node) + { + //run + while (last_cmdbuf_node && + ((struct cmdbuf_obj*)last_cmdbuf_node->data)->cmdbuf_run_done) + last_cmdbuf_node = last_cmdbuf_node->next; + + if (last_cmdbuf_node && last_cmdbuf_node->data) { + pr_info("vcmd start for cmdbuf id %d, cmdbuf_run_done = %d\n", + ((struct cmdbuf_obj*)last_cmdbuf_node->data)->cmdbuf_id, + ((struct cmdbuf_obj*)last_cmdbuf_node->data)->cmdbuf_run_done); + ret = pm_runtime_resume_and_get(&hantrovcmd_data[0].pdev->dev); + if(ret < 0) + return ret; + vcmd_start(dev,last_cmdbuf_node); + } + + } + dev->suspend_entered = false; + dev->suspend_running_cmdbuf_node = NULL; + } + return 0; +} + +static int encoder_suspend(struct device *dev) +{ + pr_info("%s, %d: enter\n", __func__, __LINE__); + encoder_vcmd_suspend_record(); + /*pm_runtime_force_suspend will check current clk state*/ + return pm_runtime_force_suspend(dev); + +} + +static int encoder_resume(struct device *dev) +{ + int ret; + ret = pm_runtime_force_resume(dev); + if (ret < 0) + return ret; + ret = encoder_vcmd_resume_start(); + pr_info("%s, %d: exit resume\n", __func__, __LINE__); + + return ret; +} + +/******************************************************************************\ +******************************* VPU Devfreq support START*********************** +\******************************************************************************/ + +static void encoder_devfreq_update_utilization(struct encoder_devfreq *devfreq) +{ + ktime_t now, last; + ktime_t busy; + now = ktime_get(); + last = devfreq->time_last_update; + + if (devfreq->busy_count > 0) { + busy = ktime_sub(now, last); + devfreq->busy_time += busy; + #ifndef CONFIG_PM_DEVFREQ + devfreq->based_maxfreq_last_busy_t = busy; + #else + if(devfreq->max_freq) + devfreq->based_maxfreq_last_busy_t = busy/(devfreq->max_freq/devfreq->cur_devfreq); + else + devfreq->based_maxfreq_last_busy_t = busy; + #endif + devfreq->based_maxfreq_busy_time += devfreq->based_maxfreq_last_busy_t; + devfreq->busy_record_count++; + } + else + { + if(devfreq->busy_time > 0) //if first time in not recorded busy time,ignore idle time. + devfreq->idle_time += ktime_sub(now, last); + } + devfreq->time_last_update = now; +} + +static void encoder_devfreq_reset(struct encoder_devfreq *devfreq) +{ + devfreq->busy_time = 0; + devfreq->idle_time = 0; + devfreq->time_last_update = ktime_get(); +} + +void encoder_devfreq_reset_profile_record(struct encoder_devfreq *devfreq) +{ + devfreq->based_maxfreq_busy_time = 0; + devfreq->busy_record_count = 0; +} + +void encoder_devfreq_record_busy(struct encoder_devfreq *devfreq) +{ + unsigned long irqflags; + int busy_count; + if (!devfreq) + return; + //when devfreq not enabled,need into record time also. + + encoder_dev_clk_lock(); + spin_lock_irqsave(&devfreq->lock, irqflags); + busy_count = devfreq->busy_count; + PDEBUG("record_busy:busy_count = %d\n",busy_count); + if(devfreq->busy_count > 0) + { + devfreq->busy_count++; + spin_unlock_irqrestore(&devfreq->lock, irqflags); + encoder_dev_clk_unlock(); + return; + } + + encoder_devfreq_update_utilization(devfreq); + + devfreq->busy_count++; + + spin_unlock_irqrestore(&devfreq->lock, irqflags); + + if(!busy_count) + encoder_devfreq_set_rate(&hantrovcmd_data->pdev->dev); + + encoder_dev_clk_unlock(); +} + +void encoder_devfreq_record_idle(struct encoder_devfreq *devfreq) +{ + unsigned long irqflags; + + if (!devfreq) + return; + + spin_lock_irqsave(&devfreq->lock, irqflags); + PDEBUG("record_idle:busy_count = %d\n",devfreq->busy_count); + if(devfreq->busy_count > 1) + { + devfreq->busy_count--; + spin_unlock_irqrestore(&devfreq->lock, irqflags); + return; + } + + encoder_devfreq_update_utilization(devfreq); + + WARN_ON(--devfreq->busy_count < 0); + + spin_unlock_irqrestore(&devfreq->lock, irqflags); +#ifdef DEV_FREQ_DEBUG + unsigned long busy_time,idle_time; + busy_time = ktime_to_us(devfreq->busy_time); + idle_time = ktime_to_us(devfreq->idle_time); + pr_info("busy %lu idle %lu %lu %% \n", + busy_time,idle_time, + busy_time / ( (idle_time+busy_time) / 100) ); +#endif +} +struct encoder_devfreq * encoder_get_devfreq_priv_data(void) +{ + return &hantrovcmd_data->devfreq; +} +/* only reset record time now */ +int encoder_devfreq_resume(struct encoder_devfreq *devfreq) +{ + unsigned long irqflags; + + if (!devfreq->df) + return 0; + + spin_lock_irqsave(&devfreq->lock, irqflags); + devfreq->busy_count = 0;//need reset avoid up + encoder_devfreq_reset(devfreq); + + spin_unlock_irqrestore(&devfreq->lock, irqflags); + + return devfreq_resume_device(devfreq->df); +} + +int encoder_devfreq_suspend(struct encoder_devfreq *devfreq) +{ + if (!devfreq->df) + return 0; + wake_up_all(&devfreq->target_freq_wait_queue); + return devfreq_suspend_device(devfreq->df); +} + +void encoder_dev_clk_lock(void) +{ + struct encoder_devfreq *devfreq = encoder_get_devfreq_priv_data(); + if(!devfreq->df) + return; + mutex_lock(&devfreq->clk_mutex); +} + +void encoder_dev_clk_unlock(void) +{ + struct encoder_devfreq *devfreq = encoder_get_devfreq_priv_data(); + if(!devfreq->df) + return; + mutex_unlock(&devfreq->clk_mutex); +} + + +/* set rate need clk disabled,so carefully calling this function + * which will disabled clk +*/ +int encoder_devfreq_set_rate(struct device * dev) +{ + int ret; + struct encoder_devfreq *devfreq = encoder_get_devfreq_priv_data(); + struct hantrovcmd_dev *encdev = hantrovcmd_data; + if (!devfreq->df) + return 0; + if(!devfreq->update_freq_flag) + return 0; + if(debug_pr_devfreq_info) + pr_info("start set rate %ldMHz \n",devfreq->next_target_freq/1000/1000); + if( !hantrovcmd_devfreq_check_state() ) { + pr_info("devfreq check state not ok\n"); + return 0; + } + clk_disable_unprepare(encdev->cclk); + ret = dev_pm_opp_set_rate(dev, devfreq->next_target_freq); + if(ret) { + pr_err("set rate %ld MHz failed \n",devfreq->next_target_freq/1000/1000); + } else { + devfreq->cur_devfreq = devfreq->next_target_freq; + } + devfreq->update_freq_flag = false; + wake_up_all(&devfreq->target_freq_wait_queue); + ret = clk_prepare_enable(encdev->cclk); + + if(debug_pr_devfreq_info) + pr_info("finished set rate \n"); + if (ret < 0) { + dev_err(dev, "could not prepare or enable core clock\n"); + return ret; + } + return 0; +} + +static int encoder_devfreq_target(struct device * dev, unsigned long *freq, u32 flags) +{ + int ret; + struct dev_pm_opp *opp; + struct encoder_devfreq *devfreq = encoder_get_devfreq_priv_data(); + opp = devfreq_recommended_opp(dev, freq, flags); + if (IS_ERR(opp)) { + dev_info(dev, "Failed to find opp for %lu Hz\n", *freq); + return PTR_ERR(opp); + } + dev_pm_opp_put(opp); + + devfreq->next_target_freq = *freq; + if(*freq != devfreq->cur_devfreq) + { + devfreq->update_freq_flag = true; + if( !wait_event_timeout( devfreq->target_freq_wait_queue, (!devfreq->update_freq_flag), + msecs_to_jiffies(100) ) ) + { + if(debug_pr_devfreq_info) //usually last req,but all vcmd done + dev_info(dev,"devfreq target freq set : wait queue timeout\n"); + } + } + + return 0; +} + +static int encoder_devfreq_get_status(struct device *dev, struct devfreq_dev_status *stat) +{ + unsigned long irqflags; + struct encoder_devfreq *devfreq = encoder_get_devfreq_priv_data(); + stat->current_frequency = devfreq->cur_devfreq; + spin_lock_irqsave(&devfreq->lock, irqflags); + + encoder_devfreq_update_utilization(devfreq); + + stat->total_time = ktime_to_ns(ktime_add(devfreq->busy_time, + devfreq->idle_time)); + stat->busy_time = ktime_to_ns(devfreq->busy_time); + + encoder_devfreq_reset(devfreq); + + spin_unlock_irqrestore(&devfreq->lock, irqflags); + + if(debug_pr_devfreq_info){ + dev_info(dev, "busy %lu total %lu %lu %% freq %lu MHz\n", + stat->busy_time/1000, stat->total_time/1000, + stat->busy_time / (stat->total_time / 100), + stat->current_frequency / 1000 / 1000); + } + return 0; +} + +static int encoder_devfreq_get_cur_freq( struct device *dev, unsigned long *freq) +{ + struct encoder_devfreq *devfreq = encoder_get_devfreq_priv_data(); + *freq = devfreq->cur_devfreq; + return 0; +} +#ifdef CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND +struct devfreq_simple_ondemand_data encoder_gov_data; +#endif + +static struct devfreq_dev_profile encoder_devfreq_gov_data = +{ + .polling_ms = 100, + .target = encoder_devfreq_target, + .get_dev_status = encoder_devfreq_get_status, + .get_cur_freq = encoder_devfreq_get_cur_freq, +}; + +#ifdef CONFIG_TH1520_SYSTEM_MONITOR +static struct monitor_dev_profile encoder_dev_monitor = { + .type = MONITOR_TPYE_DEV, +}; +#endif + +void encoder_devfreq_fini(struct device *dev) +{ + struct encoder_devfreq *devfreq = encoder_get_devfreq_priv_data(); +#ifdef CONFIG_TH1520_SYSTEM_MONITOR + if(devfreq->mdev_info) + th1520_system_monitor_unregister(devfreq->mdev_info); +#endif + + if (devfreq->df) { + devm_devfreq_remove_device(dev, devfreq->df); + devfreq->df = NULL; + } + + if (devfreq->opp_of_table_added) { + dev_pm_opp_of_remove_table(dev); + devfreq->opp_of_table_added = false; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + if (devfreq->clkname_opp_table) { + dev_pm_opp_put_clkname(devfreq->clkname_opp_table); + devfreq->clkname_opp_table = NULL; + } +#else + if(devfreq->token >= 0) + dev_pm_opp_put_clkname(devfreq->token); +#endif + mutex_destroy(&devfreq->clk_mutex); +} + +int encoder_devfreq_init(struct device *dev) +{ + struct devfreq *df; + struct clk *new_clk; + struct opp_table *opp_table; + int ret = 0; + struct encoder_devfreq *devfreq = encoder_get_devfreq_priv_data(); + + memset(devfreq,0,sizeof(struct encoder_devfreq)); + spin_lock_init(&devfreq->lock); + init_waitqueue_head(&devfreq->target_freq_wait_queue); + mutex_init(&devfreq->clk_mutex); + +#ifdef CONFIG_PM_DEVFREQ +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + opp_table = dev_pm_opp_set_clkname(dev,"cclk"); + if(IS_ERR(opp_table)) { + pr_err("enc set cclk failed\n"); + ret = PTR_ERR(opp_table); + goto err_fini; + } + devfreq->clkname_opp_table = opp_table; +#else + devfreq->token = dev_pm_opp_set_clkname(dev,"cclk"); + if (devfreq->token < 0) { + pr_err("enc set cclk failed\n"); + ret = devfreq->token; + goto err_fini; + } +#endif + + ret = dev_pm_opp_of_add_table(dev); + if(ret) { + pr_info("enc opp table not found in dtb\n"); + goto err_fini; + } + devfreq->opp_of_table_added = true; + + new_clk = devm_clk_get(dev, "cclk"); + + devfreq->cur_devfreq = clk_get_rate(new_clk); + + encoder_devfreq_gov_data.initial_freq = devfreq->cur_devfreq; + +#ifdef CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND + encoder_gov_data.upthreshold = 80; + encoder_gov_data.downdifferential = 10; + + df = devm_devfreq_add_device(dev, + &encoder_devfreq_gov_data, + DEVFREQ_GOV_SIMPLE_ONDEMAND, + &encoder_gov_data); + + if(IS_ERR(df)) { + pr_err("Error: init devfreq %lx %ld\n", (unsigned long)dev,(long)df); + devfreq->df = NULL; + ret = PTR_ERR(df); + goto err_fini; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0) + unsigned long *freq_table = df->profile->freq_table; + if (freq_table[0] < freq_table[df->profile->max_state - 1]) { + devfreq->max_freq = freq_table[df->profile->max_state - 1]; + } else { + devfreq->max_freq = freq_table[0]; + } +#else + unsigned long *freq_table = df->freq_table; + if (freq_table[0] < freq_table[df->max_state - 1]) { + devfreq->max_freq = freq_table[df->max_state - 1]; + } else { + devfreq->max_freq = freq_table[0]; + } +#endif + pr_info("device max freq %ld\n",devfreq->max_freq); + df->suspend_freq = 0; // not set freq when suspend,not suitable for async set rate + devfreq->df = df; + +#ifdef CONFIG_TH1520_SYSTEM_MONITOR + encoder_dev_monitor.data = devfreq->df; + devfreq->mdev_info = th1520_system_monitor_register(dev, &encoder_dev_monitor); + if (IS_ERR(devfreq->mdev_info)) + devfreq->mdev_info = NULL; +#endif + +#endif +#endif + return 0; + +err_fini: + encoder_devfreq_fini(dev); + return ret; +} + +/******************************************************************************\ +******************************* VPU Devfreq support END ************************ +\******************************************************************************/ + +void venc_vcmd_profile_update(struct work_struct *work); +static DECLARE_DELAYED_WORK(venc_cmd_profile_work,venc_vcmd_profile_update); +static ktime_t last_update; +static long update_period_ms = 0; + +struct vcmd_profile venc_vcmd_profile; + +void venc_vcmd_profile_update(struct work_struct *work) +{ + //update busy time + ktime_t now,during; + struct encoder_devfreq *devfreq; + devfreq = encoder_get_devfreq_priv_data(); + now = ktime_get(); + during = ktime_sub(now,last_update); + last_update = now; + venc_vcmd_profile.dev_loading_percent = ktime_to_us(devfreq->based_maxfreq_busy_time) * 100/ktime_to_us(during); + if(venc_vcmd_profile.dev_loading_percent > venc_vcmd_profile.dev_loading_max_percent) + venc_vcmd_profile.dev_loading_max_percent = venc_vcmd_profile.dev_loading_percent; + pr_debug("based_maxfreq_busy_time %lldms,during period %lld ms",ktime_to_us(devfreq->based_maxfreq_busy_time)/1000,ktime_to_ms(during)); + + if(devfreq->busy_record_count > 0) + venc_vcmd_profile.avg_hw_proc_us = ktime_to_us(devfreq->based_maxfreq_busy_time)/devfreq->busy_record_count; + else + venc_vcmd_profile.avg_hw_proc_us = 0; + + venc_vcmd_profile.last_hw_proc_us = ktime_to_us(devfreq->based_maxfreq_last_busy_t); + venc_vcmd_profile.proced_count = devfreq->busy_record_count; + encoder_devfreq_reset_profile_record(devfreq); + if(update_period_ms > 0) + schedule_delayed_work(&venc_cmd_profile_work, msecs_to_jiffies(update_period_ms)); +} + +static ssize_t log_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + ssize_t len = 0; + + const char *module_version = "1.0.0"; + int dev_id = 0; + + + len += scnprintf(buf + len, PAGE_SIZE - len, + "[VENC] Version: %s \n" + "----------------------------------------MODULE PARAM-----------------------------\n" + "updatePeriod_ms\n" + " %d\n" + "----------------------------------------MODULE STATUS------------------------------\n" + "DevId DevLoading_%% DevLoadingMax_%%\n" + " %d %d %d\n" + + " avg_hw_proc_us last_hw_proc_us proced_count\n" + " %d %d %d \n" + "cur_submit_vcmd cur_complete_vcmd vcmd_num_share_irq\n" + " %d %d %d \n" + "----------------------------------------EXCEPTION INFO-----------------------------------------\n" + "BusErr Abort Timeout CmdErr\n" + " %d %d %d %d \n", + module_version, update_period_ms, + dev_id, venc_vcmd_profile.dev_loading_percent, venc_vcmd_profile.dev_loading_max_percent, + + venc_vcmd_profile.avg_hw_proc_us, venc_vcmd_profile.last_hw_proc_us,venc_vcmd_profile.proced_count, + venc_vcmd_profile.cur_submit_vcmd_id,venc_vcmd_profile.cur_complete_vcmd_id,venc_vcmd_profile.vcmd_num_share_irq, + venc_vcmd_profile.vcmd_buserr_cnt, venc_vcmd_profile.vcmd_abort_cnt, venc_vcmd_profile.vcmd_timeout_cnt, venc_vcmd_profile.vcmd_cmderr_cnt); + + + return len; +} + +static ssize_t log_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ + /******************clear *********************/ + venc_vcmd_profile.vcmd_buserr_cnt = 0; + venc_vcmd_profile.vcmd_abort_cnt = 0; + venc_vcmd_profile.vcmd_timeout_cnt = 0; + venc_vcmd_profile.vcmd_cmderr_cnt = 0; + + venc_vcmd_profile.dev_loading_max_percent = 0; + venc_vcmd_profile.last_hw_proc_us = 0; + return count; +} + +/******************updatePeriod ************************************/ +static ssize_t updatePeriod_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf,"%u\n",update_period_ms); +} + +static ssize_t updatePeriod_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ + char *start = (char *)buf; + long old_period = update_period_ms; + update_period_ms = simple_strtoul(start, &start, 0); + if(old_period == 0 && update_period_ms) + schedule_delayed_work(&venc_cmd_profile_work,msecs_to_jiffies(update_period_ms)); + return count; +} +/******************define log *************************************/ +static struct kobj_attribute log_attr = __ATTR(log, 0664, log_show, log_store); +/******************define updatePeriod_ms*************************************/ +static struct kobj_attribute updatePeriod_attr = __ATTR(updatePeriod_ms, 0664, updatePeriod_show, updatePeriod_store); + +static struct attribute *attrs[] = { + &log_attr.attr, + &updatePeriod_attr.attr, + NULL, // must be NULL +}; + +static struct attribute_group venc_dev_attr_group = { + .name = "info", // dir name + .attrs = attrs, +}; + +int __init hantroenc_vcmd_probe(struct platform_device *pdev) +{ + int i,k; + int result; + + struct resource *mem; + mem = platform_get_resource(pdev,IORESOURCE_MEM,0); + if(mem->start) + vcmd_core_array[0].vcmd_base_addr = mem->start; + vcmd_core_array[0].vcmd_irq = platform_get_irq(pdev,0); + pr_info("%s:get irq %d\n",__func__,vcmd_core_array[0].vcmd_irq); + + result = vcmd_init(pdev); + if(result) + goto err; + + total_vcmd_core_num = 1; + for (i = 0; i< total_vcmd_core_num; i++) + { + pr_info("vcmd: module init - vcmdcore[%d] addr =0x%llx\n",i, + (long long unsigned int)vcmd_core_array[i].vcmd_base_addr); + } + hantrovcmd_data = (struct hantrovcmd_dev *)vmalloc(sizeof(struct hantrovcmd_dev)*total_vcmd_core_num); + if (hantrovcmd_data == NULL) + goto err1; + memset(hantrovcmd_data,0,sizeof(struct hantrovcmd_dev)*total_vcmd_core_num); + for(k=0;kpdev = pdev; + + encoder_add_debugfs(pdev); + + hantrovcmd_data->has_power_domains = check_power_domain(); + + hantrovcmd_data->aclk = devm_clk_get(&pdev->dev, "aclk"); + if (IS_ERR(hantrovcmd_data->aclk)) { + dev_err(&pdev->dev, "failed to get axi clock\n"); + goto err; + } + + hantrovcmd_data->cclk = devm_clk_get(&pdev->dev, "cclk"); + if (IS_ERR(hantrovcmd_data->cclk)) { + dev_err(&pdev->dev, "failed to get core clock\n"); + goto err; + } + + hantrovcmd_data->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(hantrovcmd_data->pclk)) { + dev_err(&pdev->dev, "failed to get apb clock\n"); + goto err; + } + + pm_runtime_set_autosuspend_delay(&pdev->dev, VC8000E_PM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + if (!pm_runtime_enabled(&pdev->dev)) { + if (encoder_runtime_resume(&pdev->dev)) + { + pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + } + } + pm_runtime_resume_and_get(&pdev->dev); + + init_bi_list(&global_process_manager); + result = ConfigAXIFE(1); //1: normal, 2: bypass + if(result < 0) + { + vcmd_release_AXIFE_IO(); + goto err1; + } + result = ConfigMMU(); + if(result < 0) + { + vcmd_release_MMU_IO(); + goto err1; + } + result = MMU_Kernel_map(); + if(result < 0) + goto err; + for(i=0;i busy, change your config. core_id=%d\n", + hantrovcmd_data[i].vcmd_core_cfg.vcmd_irq,i); + vcmd_release_IO(); + goto err; + } + } + else + { + pr_info("vc8000_vcmd_driver: IRQ not in use!\n"); + } + } + //cmdbuf pool allocation + //init_vcmd_non_cachable_memory_allocate(); + //for cmdbuf management + cmdbuf_used_pos=0; + for(k=0;k\n", hantrovcmd_major); + + create_kernel_process_manager(); + for(i=0;idev) ) { + pr_err("venc devfreq not enabled\n"); + } else { + pr_info("venc devfreq init ok\n"); + } + result = sysfs_create_group(&pdev->dev.kobj, &venc_dev_attr_group); + if(result) + pr_warn("venc create sysfs failed\n"); + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + + return 0; + err: + if (root_debugfs_dir) { + debugfs_remove_recursive(root_debugfs_dir); + root_debugfs_dir = NULL; + } +#ifdef HANTROMMU_SUPPORT + MMU_Kernel_unmap(); + vcmd_pool_release(pdev); +#endif + unregister_chrdev_region(hantrovcmd_devt, 1); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + err1: + if (hantrovcmd_data != NULL) + vfree(hantrovcmd_data); + pr_info("vc8000_vcmd_driver: module not inserted\n"); + return result; + +} + +static int hantroenc_vcmd_remove(struct platform_device *pdev) +{ + int i=0; + u32 result; + + if (root_debugfs_dir) { + debugfs_remove_recursive(root_debugfs_dir); + root_debugfs_dir = NULL; + } + cancel_delayed_work_sync(&venc_cmd_profile_work); + pm_runtime_resume_and_get(&pdev->dev); + + for(i=0;idev); + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); + if (!pm_runtime_status_suspended(&pdev->dev)) + encoder_runtime_suspend(&pdev->dev); + vfree(hantrovcmd_data); + + cdev_del(&hantrovcmd_cdev); + device_destroy(hantrovcmd_class, hantrovcmd_devt); + unregister_chrdev_region(hantrovcmd_devt, 1); + class_destroy(hantrovcmd_class); + sysfs_remove_group(&pdev->dev.kobj,&venc_dev_attr_group); +#ifndef DYNAMIC_MALLOC_VCMDNODE + if (g_cmdbuf_obj_pool) { + vfree(g_cmdbuf_obj_pool); + g_cmdbuf_obj_pool = NULL; + } + if (g_cmdbuf_node_pool) { + vfree(g_cmdbuf_node_pool); + g_cmdbuf_node_pool = NULL; + } +#endif + + pr_info("vc8000_vcmd_driver: module removed\n"); + return 0; +} + + +static const struct dev_pm_ops encoder_runtime_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(encoder_suspend, encoder_resume) + SET_RUNTIME_PM_OPS(encoder_runtime_suspend, encoder_runtime_resume, NULL) +}; + +static struct platform_driver hantroenc_vcmd_driver = { + .probe = hantroenc_vcmd_probe, + .remove = hantroenc_vcmd_remove, + .driver = { + .name = "encoder_hantroenc", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(hantro_of_match), + .pm = &encoder_runtime_pm_ops, + } +}; + + +int __init hantroenc_vcmd_init(void) +{ + int ret = 0; + pr_debug("enter %s\n",__func__); + ret = platform_driver_register(&hantroenc_vcmd_driver); + if(ret) + { + pr_err("register platform driver failed!\n"); + } + return ret; +} + +void __exit hantroenc_vcmd_cleanup(void) +{ + pr_debug("enter %s\n",__func__); + platform_driver_unregister(&hantroenc_vcmd_driver); + return; +} + +static int vcmd_reserve_IO(void) +{ + u32 hwid; + int i; + u32 found_hw = 0; + + pr_info("vcmd_reserve_IO: total_vcmd_core_num is %d\n", total_vcmd_core_num); + for (i=0;i> 16) & 0xFFFF) != VCMD_HW_ID ) + { + pr_info("hantrovcmd: HW not found at 0x%llx\n", + (long long unsigned int)hantrovcmd_data[i].vcmd_core_cfg.vcmd_base_addr); + iounmap(( void *) hantrovcmd_data[i].hwregs); + release_mem_region(hantrovcmd_data[i].vcmd_core_cfg.vcmd_base_addr, hantrovcmd_data[i].vcmd_core_cfg.vcmd_iosize); + hantrovcmd_data[i].hwregs = NULL; + continue; + } + + found_hw = 1; + + pr_info( + "hantrovcmd: HW at base <0x%llx> with ID <0x%08x>\n", + (long long unsigned int)hantrovcmd_data[i].vcmd_core_cfg.vcmd_base_addr, hwid); + + } + + if (found_hw == 0) + { + pr_err("hantrovcmd: NO ANY HW found!!\n"); + return -1; + } + + return 0; +} + +static void vcmd_release_IO(void) +{ + u32 i; + + vcmd_release_AXIFE_IO(); + vcmd_release_MMU_IO(); + for (i=0;ispinlock, flags); + if (dev->list_manager.head==NULL) + { + PDEBUG( "hantrovcmd_isr:received IRQ but core has nothing to do.\n"); + irq_status = vcmd_read_reg((const void *)dev->hwregs,VCMD_REGISTER_INT_STATUS_OFFSET); + vcmd_write_reg((const void *)dev->hwregs,VCMD_REGISTER_INT_STATUS_OFFSET,irq_status); + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + + PDEBUG( "hantrovcmd_isr: received IRQ!\n"); + irq_status = vcmd_read_reg((const void *)dev->hwregs,VCMD_REGISTER_INT_STATUS_OFFSET); +#ifdef VCMD_DEBUG_INTERNAL + { + u32 i, fordebug; + for(i=0;ihwregs, i*4); + pr_info("vcmd register %d:0x%x\n",i,fordebug); + } + } +#endif + + if(!irq_status) + { + //pr_info("hantrovcmd_isr error,irq_status :0x%x",irq_status); + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + + PDEBUG( "irq_status of %d is:0x%x\n",dev->core_id,irq_status); + vcmd_write_reg((const void *)dev->hwregs,VCMD_REGISTER_INT_STATUS_OFFSET,irq_status); + dev->reg_mirror[VCMD_REGISTER_INT_STATUS_OFFSET/4] = irq_status; + + if((dev->hw_version_id > HW_ID_1_0_C )&&(irq_status&0x3f)) + { + //if error,read from register directly. + cmdbuf_id = vcmd_get_register_value((const void *)dev->hwregs,dev->reg_mirror,HWIF_VCMD_CMDBUF_EXECUTING_ID); + venc_vcmd_profile.cur_complete_vcmd_id = cmdbuf_id; + if(cmdbuf_id>=TOTAL_DISCRETE_CMDBUF_NUM) + { + pr_err("hantrovcmd_isr error cmdbuf_id greater than the ceiling !!\n"); + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + } + else if((dev->hw_version_id > HW_ID_1_0_C )) + { + //read cmdbuf id from ddr +#ifdef VCMD_DEBUG_INTERNAL + { + u32 i, fordebug; + pr_info("ddr vcmd register phy_addr=0x%x\n",dev->vcmd_reg_mem_busAddress); + pr_info("ddr vcmd register virt_addr=0x%x\n",dev->vcmd_reg_mem_virtualAddress); + for(i=0;ivcmd_reg_mem_virtualAddress+i); + pr_info("ddr vcmd register %d:0x%x\n",i,fordebug); + } + } +#endif + + cmdbuf_id = *(dev->vcmd_reg_mem_virtualAddress+EXECUTING_CMDBUF_ID_ADDR); + venc_vcmd_profile.cur_complete_vcmd_id = cmdbuf_id; + pr_debug("hantrovcmd_isr: cmdbuf_id %d from virtual!!\n", cmdbuf_id); + if(cmdbuf_id>=TOTAL_DISCRETE_CMDBUF_NUM) + { + pr_err("hantrovcmd_isr error cmdbuf_id greater than the ceiling !!\n"); + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + } + + + if(vcmd_get_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_RESET)) + { + //reset error,all cmdbuf that is not done will be run again. + new_cmdbuf_node = dev->list_manager.head; + dev->working_state = WORKING_STATE_IDLE; + //find the first run_done=0 + while(1) + { + if(new_cmdbuf_node==NULL) + break; + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if((cmdbuf_obj->cmdbuf_run_done == 0)) + break; + new_cmdbuf_node = new_cmdbuf_node->next; + } + base_cmdbuf_node = new_cmdbuf_node; + vcmd_delink_cmdbuf(dev,base_cmdbuf_node); + vcmd_link_cmdbuf(dev,base_cmdbuf_node); + if(dev->sw_cmdbuf_rdy_num !=0) + { + //restart new command + vcmd_start(dev,base_cmdbuf_node); + } + handled++; + trace_venc_interrupt(0xffffffff,irq_status,0); + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + if(vcmd_get_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_ABORT)) + { + //abort error,don't need to reset + new_cmdbuf_node = dev->list_manager.head; + dev->working_state = WORKING_STATE_IDLE; + venc_vcmd_profile.vcmd_abort_cnt++; + if(dev->hw_version_id > HW_ID_1_0_C ) + { + new_cmdbuf_node = global_cmdbuf_node[cmdbuf_id]; + if(new_cmdbuf_node==NULL) + { + pr_err("hantrovcmd_isr error cmdbuf_id %d line %d!!\n",cmdbuf_id,__LINE__); + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + } + else + { + exe_cmdbuf_busAddress = VCMDGetAddrRegisterValue((const void *)dev->hwregs,dev->reg_mirror,HWIF_VCMD_EXECUTING_CMD_ADDR); + //find the cmdbuf that tigers ABORT + while(1) + { + if(new_cmdbuf_node==NULL) + { + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if((((cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr) <=exe_cmdbuf_busAddress)&&(((cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr+cmdbuf_obj->cmdbuf_size) >exe_cmdbuf_busAddress)) ) &&(cmdbuf_obj->cmdbuf_run_done==0)) + break; + new_cmdbuf_node = new_cmdbuf_node->next; + } + } + base_cmdbuf_node = new_cmdbuf_node; + // this cmdbuf and cmdbufs prior to itself, run_done = 1 + while(1) + { + if(new_cmdbuf_node==NULL) + break; + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if((cmdbuf_obj->cmdbuf_run_done==0)) + { + cmdbuf_obj->cmdbuf_run_done=1; + cmdbuf_obj->executing_status = CMDBUF_EXE_STATUS_OK; + cmdbuf_processed_num++; + encoder_devfreq_record_idle( encoder_get_devfreq_priv_data() ); + } + else + break; + new_cmdbuf_node = new_cmdbuf_node->previous; + } + base_cmdbuf_node=base_cmdbuf_node->next; + vcmd_delink_cmdbuf(dev,base_cmdbuf_node); + if(software_triger_abort==0) + { + //for QCFE + vcmd_link_cmdbuf(dev,base_cmdbuf_node); + if(dev->sw_cmdbuf_rdy_num !=0) + { + //restart new command + vcmd_start(dev,base_cmdbuf_node); + } + } + trace_venc_interrupt(cmdbuf_id,irq_status,cmdbuf_processed_num); + spin_unlock_irqrestore(dev->spinlock, flags); + if(cmdbuf_processed_num) + wake_up_all(dev->wait_queue); + //to let high priority cmdbuf be inserted + wake_up_all(dev->wait_abort_queue); + handled++; + return IRQ_HANDLED; + } + if(vcmd_get_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_BUSERR)) + { + //bus error, don't need to reset where to record status? + new_cmdbuf_node = dev->list_manager.head; + dev->working_state = WORKING_STATE_IDLE; + venc_vcmd_profile.vcmd_buserr_cnt++; + if(dev->hw_version_id > HW_ID_1_0_C ) + { + new_cmdbuf_node = global_cmdbuf_node[cmdbuf_id]; + if(new_cmdbuf_node==NULL) + { + pr_err("hantrovcmd_isr error cmdbuf_id %d line %d!!\n",cmdbuf_id,__LINE__); + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + } + else + { + exe_cmdbuf_busAddress = VCMDGetAddrRegisterValue((const void *)dev->hwregs,dev->reg_mirror,HWIF_VCMD_EXECUTING_CMD_ADDR); + //find the buserr cmdbuf + while(1) + { + if(new_cmdbuf_node==NULL) + { + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if((((cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr) <=exe_cmdbuf_busAddress)&&(((cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr+cmdbuf_obj->cmdbuf_size) >exe_cmdbuf_busAddress)) ) &&(cmdbuf_obj->cmdbuf_run_done==0)) + break; + new_cmdbuf_node = new_cmdbuf_node->next; + } + } + base_cmdbuf_node = new_cmdbuf_node; + // this cmdbuf and cmdbufs prior to itself, run_done = 1 + while(1) + { + if(new_cmdbuf_node==NULL) + break; + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if((cmdbuf_obj->cmdbuf_run_done==0)) + { + cmdbuf_obj->cmdbuf_run_done=1; + cmdbuf_obj->executing_status = CMDBUF_EXE_STATUS_OK; + cmdbuf_processed_num++; + encoder_devfreq_record_idle( encoder_get_devfreq_priv_data() ); + } + else + break; + new_cmdbuf_node = new_cmdbuf_node->previous; + } + new_cmdbuf_node = base_cmdbuf_node; + if(new_cmdbuf_node!=NULL) + { + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + cmdbuf_obj->executing_status = CMDBUF_EXE_STATUS_BUSERR; + } + base_cmdbuf_node=base_cmdbuf_node->next; + vcmd_delink_cmdbuf(dev,base_cmdbuf_node); + vcmd_link_cmdbuf(dev,base_cmdbuf_node); + if(dev->sw_cmdbuf_rdy_num !=0) + { + //restart new command + vcmd_start(dev,base_cmdbuf_node); + } + trace_venc_interrupt(cmdbuf_id,irq_status,cmdbuf_processed_num); + spin_unlock_irqrestore(dev->spinlock, flags); + if(cmdbuf_processed_num) + wake_up_all(dev->wait_queue); + handled++; + return IRQ_HANDLED; + } + if(vcmd_get_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_TIMEOUT)) + { + //time out,need to reset + new_cmdbuf_node = dev->list_manager.head; + venc_vcmd_profile.vcmd_timeout_cnt++; + dev->working_state = WORKING_STATE_IDLE; + if(dev->hw_version_id > HW_ID_1_0_C ) + { + new_cmdbuf_node = global_cmdbuf_node[cmdbuf_id]; + if(new_cmdbuf_node==NULL) + { + pr_err("hantrovcmd_isr error cmdbuf_id %d line %d!!\n",cmdbuf_id,__LINE__); + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + } + else + { + exe_cmdbuf_busAddress = VCMDGetAddrRegisterValue((const void *)dev->hwregs,dev->reg_mirror,HWIF_VCMD_EXECUTING_CMD_ADDR); + //find the timeout cmdbuf + while(1) + { + if(new_cmdbuf_node==NULL) + { + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if((((cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr) <=exe_cmdbuf_busAddress)&&(((cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr+cmdbuf_obj->cmdbuf_size) >exe_cmdbuf_busAddress)) ) &&(cmdbuf_obj->cmdbuf_run_done==0)) + break; + new_cmdbuf_node = new_cmdbuf_node->next; + } + } + base_cmdbuf_node = new_cmdbuf_node; + new_cmdbuf_node = new_cmdbuf_node->previous; + // this cmdbuf and cmdbufs prior to itself, run_done = 1 + while(1) + { + if(new_cmdbuf_node==NULL) + break; + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if((cmdbuf_obj->cmdbuf_run_done==0)) + { + cmdbuf_obj->cmdbuf_run_done=1; + cmdbuf_obj->executing_status = CMDBUF_EXE_STATUS_OK; + cmdbuf_processed_num++; + encoder_devfreq_record_idle( encoder_get_devfreq_priv_data() ); + } + else + break; + new_cmdbuf_node = new_cmdbuf_node->previous; + } + vcmd_delink_cmdbuf(dev,base_cmdbuf_node); + vcmd_link_cmdbuf(dev,base_cmdbuf_node); + if(dev->sw_cmdbuf_rdy_num !=0) + { + //reset + vcmd_reset_current_asic(dev); + //restart new command + vcmd_start(dev,base_cmdbuf_node); + } + trace_venc_interrupt(cmdbuf_id,irq_status,cmdbuf_processed_num); + spin_unlock_irqrestore(dev->spinlock, flags); + if(cmdbuf_processed_num) + wake_up_all(dev->wait_queue); + handled++; + return IRQ_HANDLED; + } + if(vcmd_get_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_CMDERR)) + { + //command error,don't need to reset + new_cmdbuf_node = dev->list_manager.head; + dev->working_state = WORKING_STATE_IDLE; + venc_vcmd_profile.vcmd_cmderr_cnt++; + if(dev->hw_version_id > HW_ID_1_0_C ) + { + new_cmdbuf_node = global_cmdbuf_node[cmdbuf_id]; + if(new_cmdbuf_node==NULL) + { + pr_err("hantrovcmd_isr error cmdbuf_id %d!!\n", cmdbuf_id); + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + } + else + { + exe_cmdbuf_busAddress = VCMDGetAddrRegisterValue((const void *)dev->hwregs,dev->reg_mirror,HWIF_VCMD_EXECUTING_CMD_ADDR); + //find the cmderror cmdbuf + while(1) + { + if(new_cmdbuf_node==NULL) + { + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if((((cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr) <=exe_cmdbuf_busAddress)&&(((cmdbuf_obj->cmdbuf_busAddress-base_ddr_addr+cmdbuf_obj->cmdbuf_size) >exe_cmdbuf_busAddress)) ) &&(cmdbuf_obj->cmdbuf_run_done==0)) + break; + new_cmdbuf_node = new_cmdbuf_node->next; + } + } + base_cmdbuf_node = new_cmdbuf_node; + // this cmdbuf and cmdbufs prior to itself, run_done = 1 + while(1) + { + if(new_cmdbuf_node==NULL) + break; + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if((cmdbuf_obj->cmdbuf_run_done==0)) + { + cmdbuf_obj->cmdbuf_run_done=1; + cmdbuf_obj->executing_status = CMDBUF_EXE_STATUS_OK; + cmdbuf_processed_num++; + encoder_devfreq_record_idle( encoder_get_devfreq_priv_data() ); + } + else + break; + new_cmdbuf_node = new_cmdbuf_node->previous; + } + new_cmdbuf_node = base_cmdbuf_node; + if(new_cmdbuf_node!=NULL) + { + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + cmdbuf_obj->executing_status = CMDBUF_EXE_STATUS_CMDERR;//cmderr + } + base_cmdbuf_node=base_cmdbuf_node->next; + vcmd_delink_cmdbuf(dev,base_cmdbuf_node); + vcmd_link_cmdbuf(dev,base_cmdbuf_node); + if(dev->sw_cmdbuf_rdy_num !=0) + { + //restart new command + vcmd_start(dev,base_cmdbuf_node); + } + trace_venc_interrupt(cmdbuf_id,irq_status,cmdbuf_processed_num); + spin_unlock_irqrestore(dev->spinlock, flags); + if(cmdbuf_processed_num) + wake_up_all(dev->wait_queue); + handled++; + return IRQ_HANDLED; + } + + if(vcmd_get_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_ENDCMD)) + { + //end command interrupt + new_cmdbuf_node = dev->list_manager.head; + dev->working_state = WORKING_STATE_IDLE; + if(dev->hw_version_id > HW_ID_1_0_C ) + { + new_cmdbuf_node = global_cmdbuf_node[cmdbuf_id]; + if(new_cmdbuf_node==NULL) + { + pr_err("hantrovcmd_isr error cmdbuf_id %d line %d!!\n",cmdbuf_id,__LINE__); + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + } + else + { + //find the end cmdbuf + while(1) + { + if(new_cmdbuf_node==NULL) + { + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if((cmdbuf_obj->has_end_cmdbuf == 1)&&(cmdbuf_obj->cmdbuf_run_done==0)) + break; + new_cmdbuf_node = new_cmdbuf_node->next; + } + } + base_cmdbuf_node = new_cmdbuf_node; + // this cmdbuf and cmdbufs prior to itself, run_done = 1 + while(1) + { + if(new_cmdbuf_node==NULL) + break; + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if((cmdbuf_obj->cmdbuf_run_done==0)) + { + cmdbuf_obj->cmdbuf_run_done=1; + cmdbuf_obj->executing_status = CMDBUF_EXE_STATUS_OK; + cmdbuf_processed_num++; + encoder_devfreq_record_idle( encoder_get_devfreq_priv_data() ); + } + else + break; + new_cmdbuf_node = new_cmdbuf_node->previous; + } + base_cmdbuf_node=base_cmdbuf_node->next; + vcmd_delink_cmdbuf(dev,base_cmdbuf_node); + vcmd_link_cmdbuf(dev,base_cmdbuf_node); + if(dev->sw_cmdbuf_rdy_num !=0) + { + //restart new command + vcmd_start(dev,base_cmdbuf_node); + } + trace_venc_interrupt(cmdbuf_id,irq_status,cmdbuf_processed_num); + spin_unlock_irqrestore(dev->spinlock, flags); + if(cmdbuf_processed_num) + wake_up_all(dev->wait_queue); + handled++; + return IRQ_HANDLED; + } + if(dev->hw_version_id <= HW_ID_1_0_C ) + cmdbuf_id = vcmd_get_register_mirror_value(dev->reg_mirror,HWIF_VCMD_IRQ_INTCMD); + if(cmdbuf_id) + { + if(dev->hw_version_id <= HW_ID_1_0_C ) + { + if(cmdbuf_id>=TOTAL_DISCRETE_CMDBUF_NUM) + { + pr_err("hantrovcmd_isr error cmdbuf_id greater than the ceiling !!\n"); + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + } + new_cmdbuf_node = global_cmdbuf_node[cmdbuf_id]; + if(new_cmdbuf_node==NULL) + { + pr_err("hantrovcmd_isr error cmdbuf_id %d line %d!!\n",cmdbuf_id,__LINE__); + spin_unlock_irqrestore(dev->spinlock, flags); + return IRQ_HANDLED; + } + // interrupt cmdbuf and cmdbufs prior to itself, run_done = 1 + while(1) + { + if(new_cmdbuf_node==NULL) + break; + cmdbuf_obj = (struct cmdbuf_obj*)new_cmdbuf_node->data; + if((cmdbuf_obj->cmdbuf_run_done==0)) + { + cmdbuf_obj->cmdbuf_run_done=1; + cmdbuf_obj->executing_status = CMDBUF_EXE_STATUS_OK; + cmdbuf_processed_num++; + encoder_devfreq_record_idle( encoder_get_devfreq_priv_data() ); + } + else + break; + new_cmdbuf_node = new_cmdbuf_node->previous; + } + handled++; + } + venc_vcmd_profile.vcmd_num_share_irq = cmdbuf_processed_num; + trace_venc_interrupt(cmdbuf_id,irq_status,cmdbuf_processed_num); + spin_unlock_irqrestore(dev->spinlock, flags); + if(cmdbuf_processed_num) + wake_up_all(dev->wait_queue); + if(!handled) + { + PDEBUG("IRQ received, but not hantro's!\n"); + } + return IRQ_HANDLED; +} + +static void vcmd_reset_asic(struct hantrovcmd_dev * dev) +{ + int i,n; + u32 result; + for (n=0;nhwregs!=NULL) + { + //disable interrupt at first + vcmd_write_reg((const void *)dev->hwregs,VCMD_REGISTER_INT_CTL_OFFSET,0x0000); + //reset all + vcmd_write_reg((const void *)dev->hwregs,VCMD_REGISTER_CONTROL_OFFSET,0x0002); + //read status register + result =vcmd_read_reg((const void *)dev->hwregs,VCMD_REGISTER_INT_STATUS_OFFSET); + //clean status register + vcmd_write_reg((const void *)dev->hwregs,VCMD_REGISTER_INT_STATUS_OFFSET,result); + } + +} + +#ifdef VCMD_DEBUG_INTERNAL +static void printk_vcmd_register_debug(const void *hwregs, char * info) +{ + u32 i, fordebug; + for(i=0;i= vcmd_buf_mem_pool.busAddress && + (addr - vcmd_buf_mem_pool.busAddress + size) <= CMDBUF_POOL_TOTAL_SIZE) || + (addr >= vcmd_status_buf_mem_pool.busAddress && + (addr - vcmd_status_buf_mem_pool.busAddress + size) <= CMDBUF_POOL_TOTAL_SIZE) || + (addr >= vcmd_registers_mem_pool.busAddress && + (addr - vcmd_status_buf_mem_pool.busAddress + size) <= CMDBUF_VCMD_REGISTER_TOTAL_SIZE); + + return bInRange; +} diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdregisterenum.h b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdregisterenum.h new file mode 100644 index 00000000000000..cb9addf5f2c94e --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdregisterenum.h @@ -0,0 +1,157 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ +/* Register interface based on the document version 1.1.2 */ + HWIF_VCMD_HW_ID, + HWIF_VCMD_HW_VERSION, + HWIF_VCMD_HW_BUILD_DATE, + HWIF_VCMD_EXT_ABN_INT_SRC_VCD_MMU, + HWIF_VCMD_EXT_ABN_INT_SRC_VCD_L2CACHE, + HWIF_VCMD_EXT_ABN_INT_SRC_VCD_DEC400, + HWIF_VCMD_EXT_ABN_INT_SRC_VCD, + HWIF_VCMD_EXT_ABN_INT_SRC_CUTREE_MMU, + HWIF_VCMD_EXT_ABN_INT_SRC_VCE_MMU, + HWIF_VCMD_EXT_ABN_INT_SRC_VCE_L2CACHE, + HWIF_VCMD_EXT_ABN_INT_SRC_VCE_DEC400, + HWIF_VCMD_EXT_ABN_INT_SRC_CUTREE, + HWIF_VCMD_EXT_ABN_INT_SRC_VCE, + HWIF_VCMD_EXT_NORM_INT_SRC_VCD_MMU, + HWIF_VCMD_EXT_NORM_INT_SRC_VCD_L2CACHE, + HWIF_VCMD_EXT_NORM_INT_SRC_VCD_DEC400, + HWIF_VCMD_EXT_NORM_INT_SRC_VCD, + HWIF_VCMD_EXT_NORM_INT_SRC_CUTREE_MMU, + HWIF_VCMD_EXT_NORM_INT_SRC_VCE_MMU, + HWIF_VCMD_EXT_NORM_INT_SRC_VCE_L2CACHE, + HWIF_VCMD_EXT_NORM_INT_SRC_VCE_DEC400, + HWIF_VCMD_EXT_NORM_INT_SRC_CUTREE, + HWIF_VCMD_EXT_NORM_INT_SRC_VCE, + HWIF_VCMD_EXE_CMDBUF_COUNT, + HWIF_VCMD_EXECUTING_CMD, + HWIF_VCMD_EXECUTING_CMD_MSB, + HWIF_VCMD_AXI_TOTAL_AR_LEN, + HWIF_VCMD_AXI_TOTAL_R, + HWIF_VCMD_AXI_TOTAL_AR, + HWIF_VCMD_AXI_TOTAL_R_LAST, + HWIF_VCMD_AXI_TOTAL_AW_LEN, + HWIF_VCMD_AXI_TOTAL_W, + HWIF_VCMD_AXI_TOTAL_AW, + HWIF_VCMD_AXI_TOTAL_W_LAST, + HWIF_VCMD_AXI_TOTAL_B, + HWIF_VCMD_AXI_AR_VALID, + HWIF_VCMD_AXI_AR_READY, + HWIF_VCMD_AXI_R_VALID, + HWIF_VCMD_AXI_R_READY, + HWIF_VCMD_AXI_AW_VALID, + HWIF_VCMD_AXI_AW_READY, + HWIF_VCMD_AXI_W_VALID, + HWIF_VCMD_AXI_W_READY, + HWIF_VCMD_AXI_B_VALID, + HWIF_VCMD_AXI_B_READY, + HWIF_VCMD_WORK_STATE, + HWIF_VCMD_INIT_MODE, + HWIF_VCMD_AXI_CLK_GATE_DISABLE, + HWIF_VCMD_MASTER_OUT_CLK_GATE_DISABLE, + HWIF_VCMD_CORE_CLK_GATE_DISABLE, + HWIF_VCMD_ABORT_MODE, + HWIF_VCMD_RESET_CORE, + HWIF_VCMD_RESET_ALL, + HWIF_VCMD_START_TRIGGER, + HWIF_VCMD_IRQ_INTCMD, + HWIF_VCMD_IRQ_JMPP, + HWIF_VCMD_IRQ_JMPD, + HWIF_VCMD_IRQ_RESET, + HWIF_VCMD_IRQ_ABORT, + HWIF_VCMD_IRQ_CMDERR, + HWIF_VCMD_IRQ_TIMEOUT, + HWIF_VCMD_IRQ_BUSERR, + HWIF_VCMD_IRQ_ENDCMD, + HWIF_VCMD_IRQ_INTCMD_EN, + HWIF_VCMD_IRQ_JMPP_EN, + HWIF_VCMD_IRQ_JMPD_EN, + HWIF_VCMD_IRQ_RESET_EN, + HWIF_VCMD_IRQ_ABORT_EN, + HWIF_VCMD_IRQ_CMDERR_EN, + HWIF_VCMD_IRQ_TIMEOUT_EN, + HWIF_VCMD_IRQ_BUSERR_EN, + HWIF_VCMD_IRQ_ENDCMD_EN, + HWIF_VCMD_TIMEOUT_EN, + HWIF_VCMD_TIMEOUT_CYCLES, + HWIF_VCMD_EXECUTING_CMD_ADDR, + HWIF_VCMD_EXECUTING_CMD_ADDR_MSB, + HWIF_VCMD_EXE_CMDBUF_LENGTH, + HWIF_VCMD_CMD_SWAP, + HWIF_VCMD_MAX_BURST_LEN, + HWIF_VCMD_AXI_ID_RD, + HWIF_VCMD_AXI_ID_WR, + HWIF_VCMD_RDY_CMDBUF_COUNT, + HWIF_VCMD_EXT_ABN_INT_SRC_VCD_MMU_GATE, + HWIF_VCMD_EXT_ABN_INT_SRC_VCD_L2CACHE_GATE, + HWIF_VCMD_EXT_ABN_INT_SRC_VCD_DEC400_GATE, + HWIF_VCMD_EXT_ABN_INT_SRC_VCD_GATE, + HWIF_VCMD_EXT_ABN_INT_SRC_CUTREE_MMU_GATE, + HWIF_VCMD_EXT_ABN_INT_SRC_VCE_MMU_GATE, + HWIF_VCMD_EXT_ABN_INT_SRC_VCE_L2CACHE_GATE, + HWIF_VCMD_EXT_ABN_INT_SRC_VCE_DEC400_GATE, + HWIF_VCMD_EXT_ABN_INT_SRC_CUTREE_GATE, + HWIF_VCMD_EXT_ABN_INT_SRC_VCE_GATE, + HWIF_VCMD_EXT_NORM_INT_SRC_VCD_MMU_GATE, + HWIF_VCMD_EXT_NORM_INT_SRC_VCD_L2CACHE_GATE, + HWIF_VCMD_EXT_NORM_INT_SRC_VCD_DEC400_GATE, + HWIF_VCMD_EXT_NORM_INT_SRC_VCD_GATE, + HWIF_VCMD_EXT_NORM_INT_SRC_CUTREE_MMU_GATE, + HWIF_VCMD_EXT_NORM_INT_SRC_VCE_MMU_GATE, + HWIF_VCMD_EXT_NORM_INT_SRC_VCE_L2CACHE_GATE, + HWIF_VCMD_EXT_NORM_INT_SRC_VCE_DEC400_GATE, + HWIF_VCMD_EXT_NORM_INT_SRC_CUTREE_GATE, + HWIF_VCMD_EXT_NORM_INT_SRC_VCE_GATE, + HWIF_VCMD_CMDBUF_EXECUTING_ID, diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdregistertable.h b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdregistertable.h new file mode 100644 index 00000000000000..4629fcdc1b5485 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdregistertable.h @@ -0,0 +1,157 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ +/* Register interface based on the document version 1.1.2 */ + VCMDREG(HWIF_VCMD_HW_ID , 0 ,0xffff0000, 16, 0,RO,"HW ID"), + VCMDREG(HWIF_VCMD_HW_VERSION , 0 ,0x0000ffff, 0, 0,RO,"version of hw(1.0.0).[15:12]-major [11:8]-minor [7:0]-build"), + VCMDREG(HWIF_VCMD_HW_BUILD_DATE , 4 ,0xffffffff, 0, 0,RO,"Hw package generation date in BCD code"), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCD_MMU , 8 ,0x08000000, 27, 0,RO,"external abnormal interrupt source from mmu of vcd."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCD_L2CACHE , 8 ,0x04000000, 26, 0,RO,"external abnormal interrupt source from l2cache of vcd."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCD_DEC400 , 8 ,0x02000000, 25, 0,RO,"external abnormal interrupt source from dec400 of vcd."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCD , 8 ,0x01000000, 24, 0,RO,"external abnormal interrupt source from vcd."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_CUTREE_MMU , 8 ,0x00200000, 21, 0,RO,"external abnormal interrupt source from mmu of cutree."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCE_MMU , 8 ,0x00100000, 20, 0,RO,"external abnormal interrupt source from mmu of vce."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCE_L2CACHE , 8 ,0x00080000, 19, 0,RO,"external abnormal interrupt source from l2 cache of vce."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCE_DEC400 , 8 ,0x00040000, 18, 0,RO,"external abnormal interrupt source from dec400 of vce."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_CUTREE , 8 ,0x00020000, 17, 0,RO,"external abnormal interrupt source from cutree."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCE , 8 ,0x00010000, 16, 0,RO,"external abnormal interrupt source from vce."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCD_MMU , 8 ,0x00000800, 11, 0,RO,"external normal interrupt source from mmu of vcd."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCD_L2CACHE , 8 ,0x00000400, 10, 0,RO,"external normal interrupt source from l2cache of vcd."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCD_DEC400 , 8 ,0x00000200, 9, 0,RO,"external normal interrupt source from dec400 of vcd."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCD , 8 ,0x00000100, 8, 0,RO,"external normal interrupt source from vcd."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_CUTREE_MMU , 8 ,0x00000020, 5, 0,RO,"external normal interrupt source from mmu of cutree."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCE_MMU , 8 ,0x00000010, 4, 0,RO,"external normal interrupt source from mmu of vce."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCE_L2CACHE , 8 ,0x00000008, 3, 0,RO,"external normal interrupt source from l2 cache of vce."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCE_DEC400 , 8 ,0x00000004, 2, 0,RO,"external normal interrupt source from dec400 of vce."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_CUTREE , 8 ,0x00000002, 1, 0,RO,"external normal interrupt source from cutree."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCE , 8 ,0x00000001, 0, 0,RO,"external normal interrupt source from vce."), + VCMDREG(HWIF_VCMD_EXE_CMDBUF_COUNT , 12 ,0xffffffff, 0, 0,RO,"Hw increases this counter by 1 after one more command buffer has been executed"), + VCMDREG(HWIF_VCMD_EXECUTING_CMD , 16 ,0xffffffff, 0, 0,RO,"the first 32 bits of the executing cmd."), + VCMDREG(HWIF_VCMD_EXECUTING_CMD_MSB , 20 ,0xffffffff, 0, 0,RO,"the second 32 bits of the executing cmd."), + VCMDREG(HWIF_VCMD_AXI_TOTAL_AR_LEN , 24 ,0xffffffff, 0, 0,RO,"axi total ar length"), + VCMDREG(HWIF_VCMD_AXI_TOTAL_R , 28 ,0xffffffff, 0, 0,RO,"axi total r"), + VCMDREG(HWIF_VCMD_AXI_TOTAL_AR , 32 ,0xffffffff, 0, 0,RO,"axi total ar"), + VCMDREG(HWIF_VCMD_AXI_TOTAL_R_LAST , 36 ,0xffffffff, 0, 0,RO,"axi total r last"), + VCMDREG(HWIF_VCMD_AXI_TOTAL_AW_LEN , 40 ,0xffffffff, 0, 0,RO,"axi total aw length"), + VCMDREG(HWIF_VCMD_AXI_TOTAL_W , 44 ,0xffffffff, 0, 0,RO,"axi total w"), + VCMDREG(HWIF_VCMD_AXI_TOTAL_AW , 48 ,0xffffffff, 0, 0,RO,"axi total aw"), + VCMDREG(HWIF_VCMD_AXI_TOTAL_W_LAST , 52 ,0xffffffff, 0, 0,RO,"axi total w last"), + VCMDREG(HWIF_VCMD_AXI_TOTAL_B , 56 ,0xffffffff, 0, 0,RO,"axi total b"), + VCMDREG(HWIF_VCMD_AXI_AR_VALID , 60 ,0x80000000, 31, 0,RO,"axi ar valid"), + VCMDREG(HWIF_VCMD_AXI_AR_READY , 60 ,0x40000000, 30, 0,RO,"axi ar ready"), + VCMDREG(HWIF_VCMD_AXI_R_VALID , 60 ,0x20000000, 29, 0,RO,"axi r valid"), + VCMDREG(HWIF_VCMD_AXI_R_READY , 60 ,0x10000000, 28, 0,RO,"axi r ready"), + VCMDREG(HWIF_VCMD_AXI_AW_VALID , 60 ,0x08000000, 27, 0,RO,"axi aw valid"), + VCMDREG(HWIF_VCMD_AXI_AW_READY , 60 ,0x04000000, 26, 0,RO,"axi aw ready"), + VCMDREG(HWIF_VCMD_AXI_W_VALID , 60 ,0x02000000, 25, 0,RO,"axi w valid"), + VCMDREG(HWIF_VCMD_AXI_W_READY , 60 ,0x01000000, 24, 0,RO,"axi w ready"), + VCMDREG(HWIF_VCMD_AXI_B_VALID , 60 ,0x00800000, 23, 0,RO,"axi b valid"), + VCMDREG(HWIF_VCMD_AXI_B_READY , 60 ,0x00400000, 22, 0,RO,"axi b ready"), + VCMDREG(HWIF_VCMD_WORK_STATE , 60 ,0x00000007, 0, 0,RO,"hw work state. 0-IDLE 1-WORK 2-STALL 3-PEND 4-ABORT"), + VCMDREG(HWIF_VCMD_INIT_MODE , 64 ,0x00000080, 7, 0,RW,"After executed a END command in init mode, VCMD will get back to normal mode"), + VCMDREG(HWIF_VCMD_AXI_CLK_GATE_DISABLE , 64 ,0x00000040, 6, 0,RW,"keep axi_clk always on when this bit is set to 1"), + VCMDREG(HWIF_VCMD_MASTER_OUT_CLK_GATE_DISABLE , 64 ,0x00000020, 5, 0,RW,"keep master_out_clk(APB/AHB master) always on when this bit is set to 1"), + VCMDREG(HWIF_VCMD_CORE_CLK_GATE_DISABLE , 64 ,0x00000010, 4, 0,RW,"keep core_clk always on when this bit is set to 1"), + VCMDREG(HWIF_VCMD_ABORT_MODE , 64 ,0x00000008, 3, 0,RW,"0:abort after finishing current cmdbuf command.1:abort immediately "), + VCMDREG(HWIF_VCMD_RESET_CORE , 64 ,0x00000004, 2, 0,RW,"sw write 1 to this bit will rset HW core logic when AXI/APB bus is idle."), + VCMDREG(HWIF_VCMD_RESET_ALL , 64 ,0x00000002, 1, 0,RW,"sw write 1 to this bit will rset HW immediately including all swregs and AXI/APB bus logic"), + VCMDREG(HWIF_VCMD_START_TRIGGER , 64 ,0x00000001, 0, 0,RW,"0:abort previou task and stop hw. 1:trigger hw to fetch and execute commands."), + VCMDREG(HWIF_VCMD_IRQ_INTCMD , 68 ,0xffff0000, 16, 0,RW,"interrupt sources which are triggered by command buffer id.. Only for version 1.0.c"), + VCMDREG(HWIF_VCMD_IRQ_JMPP , 68 ,0x00000080, 7, 0,RW,"interrupt source which is triggered by JMP command when hw goes to PEND state."), + VCMDREG(HWIF_VCMD_IRQ_JMPD , 68 ,0x00000040, 6, 0,RW,"interrupt source which is triggered by JMP command directly."), + VCMDREG(HWIF_VCMD_IRQ_RESET , 68 ,0x00000020, 5, 0,RW,"interrupt source which is triggered by hw reset or sw_vcmd_reset_all."), + VCMDREG(HWIF_VCMD_IRQ_ABORT , 68 ,0x00000010, 4, 0,RW,"interrupt source which is triggered by abort operation."), + VCMDREG(HWIF_VCMD_IRQ_CMDERR , 68 ,0x00000008, 3, 0,RW,"interrupt source which is triggered when there is illegal command in cmdbuf"), + VCMDREG(HWIF_VCMD_IRQ_TIMEOUT , 68 ,0x00000004, 2, 0,RW,"interrupt source which is triggered when vcmd timeout."), + VCMDREG(HWIF_VCMD_IRQ_BUSERR , 68 ,0x00000002, 1, 0,RW,"interrupt source which is triggered when there is bus error."), + VCMDREG(HWIF_VCMD_IRQ_ENDCMD , 68 ,0x00000001, 0, 0,RW,"interrupt source which is triggered by END command."), + VCMDREG(HWIF_VCMD_IRQ_INTCMD_EN , 72 ,0xffff0000, 16, 0,RW,"interrupt sources which are triggered by command buffer id. Only for version 1.0.c"), + VCMDREG(HWIF_VCMD_IRQ_JMPP_EN , 72 ,0x00000080, 7, 0,RW,"interrupt enable for sw_vcmd_irq_jmpp"), + VCMDREG(HWIF_VCMD_IRQ_JMPD_EN , 72 ,0x00000040, 6, 0,RW,"interrupt enable for sw_vcmd_irq_jmpd"), + VCMDREG(HWIF_VCMD_IRQ_RESET_EN , 72 ,0x00000020, 5, 0,RW,"interrupt enable for sw_vcmd_irq_reset"), + VCMDREG(HWIF_VCMD_IRQ_ABORT_EN , 72 ,0x00000010, 4, 0,RW,"interrupt enable for sw_vcmd_irq_abort"), + VCMDREG(HWIF_VCMD_IRQ_CMDERR_EN , 72 ,0x00000008, 3, 0,RW,"interrupt enable for sw_vcmd_irq_cmderr"), + VCMDREG(HWIF_VCMD_IRQ_TIMEOUT_EN , 72 ,0x00000004, 2, 0,RW,"interrupt enable for sw_vcmd_irq_timeout"), + VCMDREG(HWIF_VCMD_IRQ_BUSERR_EN , 72 ,0x00000002, 1, 0,RW,"interrupt enable for sw_vcmd_irq_buserr"), + VCMDREG(HWIF_VCMD_IRQ_ENDCMD_EN , 72 ,0x00000001, 0, 0,RW,"interrupt enable for sw_vcmd_irq_endcmd"), + VCMDREG(HWIF_VCMD_TIMEOUT_EN , 76 ,0x80000000, 31, 0,RW,"1:timeout work. 0: timeout do not work"), + VCMDREG(HWIF_VCMD_TIMEOUT_CYCLES , 76 ,0x7fffffff, 0, 0,RW,"sw_vcmd_irq_timeout will be generated when timeout counter is equal to this value."), + VCMDREG(HWIF_VCMD_EXECUTING_CMD_ADDR , 80 ,0xffffffff, 0, 0,RW,"the least 32 bits address of the executing command"), + VCMDREG(HWIF_VCMD_EXECUTING_CMD_ADDR_MSB , 84 ,0xffffffff, 0, 0,RW,"the most 32 bits address of the executing command"), + VCMDREG(HWIF_VCMD_EXE_CMDBUF_LENGTH , 88 ,0x0000ffff, 0, 0,RW,"the length of current command buffer in unit of 64bits."), + VCMDREG(HWIF_VCMD_CMD_SWAP , 92 ,0xf0000000, 28, 0,RW,"axi data swapping"), + VCMDREG(HWIF_VCMD_MAX_BURST_LEN , 92 ,0x00ff0000, 16, 0,RW,"max burst length which will be sent to axi bus"), + VCMDREG(HWIF_VCMD_AXI_ID_RD , 92 ,0x0000ff00, 8, 0,RW,"the arid which will be used on axi bus reading"), + VCMDREG(HWIF_VCMD_AXI_ID_WR , 92 ,0x000000ff, 0, 0,RW,"the awid which will be used on axi bus writing"), + VCMDREG(HWIF_VCMD_RDY_CMDBUF_COUNT , 96 ,0xffffffff, 0, 0,RW,"sw increases this counter by 1 after one more command buffer was ready."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCD_MMU_GATE , 100,0x10000000, 28, 0,RW,"external abnormal interrupt source from mmu of vcd gate."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCD_L2CACHE_GATE, 100,0x08000000, 27, 0,RW,"external abnormal interrupt source from l2cache of vcd gate."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCD_DEC400_GATE, 100,0x04000000, 26, 0,RW,"external abnormal interrupt source from dec400 of vcd gate."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCD_GATE , 100,0x01000000, 24, 0,RW,"external abnormal interrupt source from vcd gate."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_CUTREE_MMU_GATE, 100,0x00200000, 21, 0,RW,"external abnormal interrupt source from mmu of cutree gate."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCE_MMU_GATE , 100,0x00100000, 20, 0,RW,"external abnormal interrupt source from mmu of vce gate."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCE_L2CACHE_GATE, 100,0x00080000, 19, 0,RW,"external abnormal interrupt source from l2 cache of vce gate."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCE_DEC400_GATE, 100,0x00040000, 18, 0,RW,"external abnormal interrupt source from dec400 of vce gate."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_CUTREE_GATE , 100,0x00020000, 17, 0,RW,"external abnormal interrupt source from cutree gate."), + VCMDREG(HWIF_VCMD_EXT_ABN_INT_SRC_VCE_GATE , 100,0x00010000, 16, 0,RW,"external abnormal interrupt source from vce gate."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCD_MMU_GATE , 100,0x00000800, 11, 0,RW,"external normal interrupt source from mmu of vcd gate."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCD_L2CACHE_GATE, 100,0x00000400, 10, 0,RW,"external normal interrupt source from l2cache of vcd gate."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCD_DEC400_GATE, 100,0x00000200, 9, 0,RW,"external normal interrupt source from dec400 of vcd gate."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCD_GATE , 100,0x00000100, 8, 0,RW,"external normal interrupt source from vcd gate."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_CUTREE_MMU_GATE, 100,0x00000020, 5, 0,RW,"external normal interrupt source from mmu of cutree gate."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCE_MMU_GATE , 100,0x00000010, 4, 0,RW,"external normal interrupt source from mmu of vce gate."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCE_L2CACHE_GATE, 100,0x00000008, 3, 0,RW,"external normal interrupt source from l2 cache of vce gate."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCE_DEC400_GATE, 100,0x00000004, 2, 0,RW,"external normal interrupt source from dec400 of vce gate."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_CUTREE_GATE , 100,0x00000002, 1, 0,RW,"external normal interrupt source from cutree gate."), + VCMDREG(HWIF_VCMD_EXT_NORM_INT_SRC_VCE_GATE , 100,0x00000001, 0, 0,RW,"external normal interrupt source from vce gate."), + VCMDREG(HWIF_VCMD_CMDBUF_EXECUTING_ID , 104,0xffffffff, 0, 0,RW,"The ID of current executing command buffer.used after version 1.1.2."), diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdswhwregisters.c b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdswhwregisters.c new file mode 100644 index 00000000000000..da89851cd8359c --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdswhwregisters.c @@ -0,0 +1,180 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ +/*------------------------------------------------------------------------------ + + Table of contents + + 1. Include headers + 2. External compiler flags + 3. Module defines + +------------------------------------------------------------------------------*/ +/*------------------------------------------------------------------------------ + 1. Include headers +------------------------------------------------------------------------------*/ + +#include +#include "vcmdswhwregisters.h" + +/* NOTE: Don't use ',' in descriptions, because it is used as separator in csv + * parsing. */ +const regVcmdField_s asicVcmdRegisterDesc[] = +{ +#include "vcmdregistertable.h" +}; + +/*------------------------------------------------------------------------------ + 2. External compiler flags +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- + 3. Module defines +------------------------------------------------------------------------------*/ + +/* Define this to print debug info for every register write. +#define DEBUG_PRINT_REGS */ + +/******************************************************************************* + Function name : vcmd_read_reg + Description : Retrive the content of a hadware register + Note: The status register will be read after every MB + so it may be needed to buffer it's content if reading + the HW register is slow. + Return type : u32 + Argument : u32 offset +*******************************************************************************/ +u32 vcmd_read_reg(const void *hwregs, u32 offset) +{ + u32 val; + + val =(u32) ioread32((void*)hwregs + offset); + + PDEBUG("vcmd_read_reg 0x%02x --> %08x\n", offset, val); + + return val; +} + +/******************************************************************************* + Function name : vcmd_write_reg + Description : Set the content of a hadware register + Return type : void + Argument : u32 offset + Argument : u32 val +*******************************************************************************/ +void vcmd_write_reg(const void *hwregs, u32 offset, u32 val) +{ + iowrite32(val,(void*)hwregs + offset); + + PDEBUG("vcmd_write_reg 0x%02x with value %08x\n", offset, val); +} + + +/*------------------------------------------------------------------------------ + + vcmd_write_register_value + + Write a value into a defined register field (write will happens actually). + +------------------------------------------------------------------------------*/ +void vcmd_write_register_value(const void *hwregs,u32* reg_mirror,regVcmdName name, u32 value) +{ + const regVcmdField_s *field; + u32 regVal; + + field = &asicVcmdRegisterDesc[name]; + +#ifdef DEBUG_PRINT_REGS + PDEBUG("vcmd_write_register_value 0x%2x 0x%08x Value: %10d %s\n", + field->base, field->mask, value, field->description); +#endif + + /* Check that value fits in field */ + PDEBUG("field->name == name=%d\n",field->name == name); + PDEBUG("((field->mask >> field->lsb) << field->lsb) == field->mask=%d\n",((field->mask >> field->lsb) << field->lsb) == field->mask); + PDEBUG("(field->mask >> field->lsb) >= value=%d\n",(field->mask >> field->lsb) >= value); + PDEBUG("field->base < ASIC_VCMD_SWREG_AMOUNT*4=%d\n",field->base < ASIC_VCMD_SWREG_AMOUNT*4); + + /* Clear previous value of field in register */ + regVal = reg_mirror[field->base/4] & ~(field->mask); + + /* Put new value of field in register */ + reg_mirror[field->base/4] = regVal | ((value << field->lsb) & field->mask); + + /* write it into HW registers */ + vcmd_write_reg(hwregs, field->base,reg_mirror[field->base/4]); +} + +/*------------------------------------------------------------------------------ + + vcmd_get_register_value + + Get an unsigned value from the ASIC registers + +------------------------------------------------------------------------------*/ +u32 vcmd_get_register_value(const void *hwregs, u32* reg_mirror,regVcmdName name) +{ + const regVcmdField_s *field; + u32 value; + + field = &asicVcmdRegisterDesc[name]; + + PDEBUG("field->base < ASIC_VCMD_SWREG_AMOUNT * 4=%d\n",field->base < ASIC_VCMD_SWREG_AMOUNT * 4); + + value = reg_mirror[field->base / 4] = vcmd_read_reg(hwregs, field->base); + value = (value & field->mask) >> field->lsb; + + return value; +} + + diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdswhwregisters.h b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdswhwregisters.h new file mode 100644 index 00000000000000..0f57199944fb46 --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdswhwregisters.h @@ -0,0 +1,244 @@ +/**************************************************************************** +* +* The MIT License (MIT) +* +* Copyright (c) 2014 - 2021 VERISILICON +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +***************************************************************************** +* +* The GPL License (GPL) +* +* Copyright (C) 2014 - 2021 VERISILICON +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version 2 +* of the License, or (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software Foundation, +* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +* +***************************************************************************** +* +* Note: This software is released under dual MIT and GPL licenses. A +* recipient may use this file under the terms of either the MIT license or +* GPL License. If you wish to use only one license not the other, you can +* indicate your decision by deleting one of the above license notices in your +* version of this file. +* +*****************************************************************************/ + +/*------------------------------------------------------------------------------ + + Table of contents + + 1. Include headers + 2. External compiler flags + 3. Module defines + +------------------------------------------------------------------------------*/ +#ifndef VCMD_SWHWREGISTERS_H +#define VCMD_SWHWREGISTERS_H + +#ifdef __cplusplus +extern "C" { +#endif +/*------------------------------------------------------------------------------ + 1. Include headers +------------------------------------------------------------------------------*/ +#ifdef __FREERTOS__ +#include "base_type.h" +#include "io_tools.h" +#elif defined(__linux__) +#include +#include +#include +#endif + +#ifdef __FREERTOS__ +//ptr_t has been defined in base_type.h //Now the FreeRTOS mem need to support 64bit env +#elif defined(__linux__) +typedef int i32; +typedef size_t ptr_t; +#endif + +#undef PDEBUG /* undef it, just in case */ +#ifdef REGISTER_DEBUG +# ifdef __KERNEL__ + /* This one if debugging is on, and kernel space */ +# define PDEBUG(fmt, args...) printk( KERN_INFO "memalloc: " fmt, ## args) +# else + /* This one for user space */ +# define PDEBUG(fmt, args...) fprintf(stderr, fmt, ## args) +# endif +#else +# define PDEBUG(fmt, args...) /* not debugging: nothing */ +#endif + +/*------------------------------------------------------------------------------ + 2. External compiler flags +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- + 3. Module defines +------------------------------------------------------------------------------*/ +#ifdef HANTROVCMD_ENABLE_IP_SUPPORT +#define VCMD_REGISTER_INDEX_SW_INIT_CMD0 32 +#define ASIC_VCMD_SWREG_AMOUNT 64 +#else +#define ASIC_VCMD_SWREG_AMOUNT 27 +#endif +#define VCMD_REGISTER_CONTROL_OFFSET 0X40 +#define VCMD_REGISTER_INT_STATUS_OFFSET 0X44 +#define VCMD_REGISTER_INT_CTL_OFFSET 0X48 +#define VCMD_REGISTER_EXT_INT_GATE_OFFSET 0X64 +/* HW Register field names */ +typedef enum +{ +#include "vcmdregisterenum.h" + VcmdRegisterAmount +} regVcmdName; + +/* HW Register field descriptions */ +typedef struct +{ + u32 name; /* Register name and index */ + i32 base; /* Register base address */ + u32 mask; /* Bitmask for this field */ + i32 lsb; /* LSB for this field [31..0] */ + i32 trace; /* Enable/disable writing in swreg_params.trc */ + i32 rw; /* 1=Read-only 2=Write-only 3=Read-Write */ + char *description; /* Field description */ +} regVcmdField_s; + +/* Flags for read-only, write-only and read-write */ +#define RO 1 +#define WO 2 +#define RW 3 + +#define REGBASE(reg) (asicVcmdRegisterDesc[reg].base) + +/* Description field only needed for system model build. */ +#ifdef TEST_DATA +#define VCMDREG(name, base, mask, lsb, trace, rw, desc) \ + {name, base, mask, lsb, trace, rw, desc} +#else +#define VCMDREG(name, base, mask, lsb, trace, rw, desc) \ + {name, base, mask, lsb, trace, rw, ""} +#endif + + +/*------------------------------------------------------------------------------ + 4. Function prototypes +------------------------------------------------------------------------------*/ +extern const regVcmdField_s asicVcmdRegisterDesc[]; + +/*------------------------------------------------------------------------------ + + EncAsicSetRegisterValue + + Set a value into a defined register field + +------------------------------------------------------------------------------*/ +static inline void vcmd_set_register_mirror_value(u32 *reg_mirror, regVcmdName name, u32 value) +{ + const regVcmdField_s *field; + u32 regVal; + + field = &asicVcmdRegisterDesc[name]; + +#ifdef DEBUG_PRINT_REGS + printf("vcmd_set_register_mirror_value 0x%2x 0x%08x Value: %10d %s\n", + field->base, field->mask, value, field->description); +#endif + + /* Check that value fits in field */ + PDEBUG("field->name == name=%d\n",field->name == name); + PDEBUG("((field->mask >> field->lsb) << field->lsb) == field->mask=%d\n",((field->mask >> field->lsb) << field->lsb) == field->mask); + PDEBUG("(field->mask >> field->lsb) >= value=%d\n",(field->mask >> field->lsb) >= value); + PDEBUG("field->base < ASIC_VCMD_SWREG_AMOUNT * 4=%d\n",field->base < ASIC_VCMD_SWREG_AMOUNT * 4); + + /* Clear previous value of field in register */ + regVal = reg_mirror[field->base / 4] & ~(field->mask); + + /* Put new value of field in register */ + reg_mirror[field->base / 4] = regVal | ((value << field->lsb) & field->mask); +} +static inline u32 vcmd_get_register_mirror_value(u32 *reg_mirror, regVcmdName name) +{ + const regVcmdField_s *field; + u32 regVal; + + field = &asicVcmdRegisterDesc[name]; + + + /* Check that value fits in field */ + PDEBUG("field->name == name=%d\n",field->name == name); + PDEBUG("((field->mask >> field->lsb) << field->lsb) == field->mask=%d\n",((field->mask >> field->lsb) << field->lsb) == field->mask); + PDEBUG("field->base < ASIC_VCMD_SWREG_AMOUNT * 4=%d\n",field->base < ASIC_VCMD_SWREG_AMOUNT * 4); + + regVal = reg_mirror[field->base / 4]; + regVal = (regVal & field->mask) >> field->lsb; + +#ifdef DEBUG_PRINT_REGS + PDEBUG("vcmd_get_register_mirror_value 0x%2x 0x%08x Value: %10d %s\n", + field->base, field->mask, regVal, field->description); +#endif + return regVal; +} + +u32 vcmd_read_reg(const void *hwregs, u32 offset); + +void vcmd_write_reg(const void *hwregs, u32 offset, u32 val); + + + +void vcmd_write_register_value(const void *hwregs,u32* reg_mirror,regVcmdName name, u32 value); + + +u32 vcmd_get_register_value(const void *hwregs, u32* reg_mirror,regVcmdName name); + +#define vcmd_set_addr_register_value(reg_base, reg_mirror, name, value) do {\ + if(sizeof(ptr_t) == 8) {\ + vcmd_write_register_value((reg_base), (reg_mirror),name, (u32)((ptr_t)value)); \ + vcmd_write_register_value((reg_base), (reg_mirror),name##_MSB, (u32)(((ptr_t)value) >> 32));\ + } else {\ + vcmd_write_register_value((reg_base),(reg_mirror), name, (u32)((ptr_t)value));\ + }\ +}while (0) + +#define VCMDGetAddrRegisterValue(reg_base, reg_mirror,name) \ + ((sizeof(ptr_t) == 8) ? (\ + (((ptr_t)vcmd_get_register_value((reg_base),(reg_mirror), name)) | \ + (((ptr_t)vcmd_get_register_value((reg_base), (reg_mirror),name##_MSB)) << 32))\ + ) : ((ptr_t)vcmd_get_register_value((reg_base),(reg_mirror), (name)))) + +#ifdef __cplusplus +} +#endif + +#endif /* VCMD_SWHWREGISTERS_H */ \ No newline at end of file diff --git a/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/venc_trace_point.h b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/venc_trace_point.h new file mode 100644 index 00000000000000..df75dff7bd486e --- /dev/null +++ b/drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/venc_trace_point.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM venc_trace_point + +#if !defined(_TRACE_VENC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_VENC_H + +#include +#include +#include + +TRACE_EVENT(venc_interrupt, + + TP_PROTO(unsigned int complete_cmd, unsigned int irq_status, unsigned int processed_vcmd_num), + + TP_ARGS(complete_cmd, irq_status,processed_vcmd_num), + + TP_STRUCT__entry( + __field( unsigned int, complete_cmd ) + __field( unsigned int, irq_status) + __field( unsigned int, processed_vcmd_num) + ), + + TP_fast_assign( + __entry->complete_cmd = complete_cmd; + __entry->irq_status = irq_status; + __entry->processed_vcmd_num = processed_vcmd_num; + ), + + TP_printk("venc irq type complete_cmd %u irq status =%x processed_vcmd_num %d ", __entry->complete_cmd, __entry->irq_status,__entry->processed_vcmd_num) +); + +#endif /* _TRACE_VENC_H */ + +/* This part must be outside protection */ +#include \ No newline at end of file diff --git a/include/trace/events/venc_trace_point.h b/include/trace/events/venc_trace_point.h new file mode 100644 index 00000000000000..df75dff7bd486e --- /dev/null +++ b/include/trace/events/venc_trace_point.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM venc_trace_point + +#if !defined(_TRACE_VENC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_VENC_H + +#include +#include +#include + +TRACE_EVENT(venc_interrupt, + + TP_PROTO(unsigned int complete_cmd, unsigned int irq_status, unsigned int processed_vcmd_num), + + TP_ARGS(complete_cmd, irq_status,processed_vcmd_num), + + TP_STRUCT__entry( + __field( unsigned int, complete_cmd ) + __field( unsigned int, irq_status) + __field( unsigned int, processed_vcmd_num) + ), + + TP_fast_assign( + __entry->complete_cmd = complete_cmd; + __entry->irq_status = irq_status; + __entry->processed_vcmd_num = processed_vcmd_num; + ), + + TP_printk("venc irq type complete_cmd %u irq status =%x processed_vcmd_num %d ", __entry->complete_cmd, __entry->irq_status,__entry->processed_vcmd_num) +); + +#endif /* _TRACE_VENC_H */ + +/* This part must be outside protection */ +#include \ No newline at end of file