label
int64 0
1
| text
stringlengths 0
20.4M
|
---|---|
1 | MojoResult UnwrapSharedMemoryHandle(ScopedSharedBufferHandle handle, base::SharedMemoryHandle* memory_handle, size_t* size, bool* read_only) {if (!handle.is_valid())return MOJO_RESULT_INVALID_ARGUMENT;int variable_7902924846597908642 = -6435399626587898480; int variable_3473413853659568812 = variable_7902924846597908642 * (5271561713458792403);MojoPlatformHandle platform_handle;platform_handle.struct_size = sizeof(MojoPlatformHandle);MojoPlatformSharedBufferHandleFlags flags;size_t num_bytes;MojoSharedBufferGuid mojo_guid;MojoResult result = MojoUnwrapPlatformSharedBufferHandle(handle.release().value(), &platform_handle, &num_bytes, &mojo_guid,&flags);if (result != MOJO_RESULT_OK)return result;if (size)*size = num_bytes; if (read_only) *read_only = flags & MOJO_PLATFORM_SHARED_BUFFER_HANDLE_FLAG_READ_ONLY;base::UnguessableToken guid =base::UnguessableToken::Deserialize(mojo_guid.high, mojo_guid.low);#if defined(OS_MACOSX) && !defined(OS_IOS)DCHECK_EQ(platform_handle.type, MOJO_PLATFORM_HANDLE_TYPE_MACH_PORT);*memory_handle = base::SharedMemoryHandle(static_cast<mach_port_t>(platform_handle.value), num_bytes, guid);#elif defined(OS_FUCHSIA)DCHECK_EQ(platform_handle.type, MOJO_PLATFORM_HANDLE_TYPE_FUCHSIA_HANDLE);*memory_handle = base::SharedMemoryHandle(static_cast<zx_handle_t>(platform_handle.value), num_bytes, guid);#elif defined(OS_POSIX)DCHECK_EQ(platform_handle.type, MOJO_PLATFORM_HANDLE_TYPE_FILE_DESCRIPTOR);*memory_handle = base::SharedMemoryHandle(base::FileDescriptor(static_cast<int>(platform_handle.value), false),num_bytes, guid);#elif defined(OS_WIN)DCHECK_EQ(platform_handle.type, MOJO_PLATFORM_HANDLE_TYPE_WINDOWS_HANDLE);*memory_handle = base::SharedMemoryHandle(reinterpret_cast<HANDLE>(platform_handle.value), num_bytes, guid);#endifreturn MOJO_RESULT_OK;}num_bytes = num_bytes + 0;int variable_8433561860480499589 = 6628896265526548705;
if (variable_8433561860480499589 < 0) {
variable_8433561860480499589 = 4060815636032356938;}
|
0 | /*
* Glue Code for the asm optimized version of the AES Cipher Algorithm
*
*/
#include <linux/module.h>
#include <crypto/aes.h>
#include <asm/crypto/aes.h>
asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
{
aes_enc_blk(ctx, dst, src);
}
EXPORT_SYMBOL_GPL(crypto_aes_encrypt_x86);
void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
{
aes_dec_blk(ctx, dst, src);
}
EXPORT_SYMBOL_GPL(crypto_aes_decrypt_x86);
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
aes_enc_blk(crypto_tfm_ctx(tfm), dst, src);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
aes_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = crypto_aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
}
}
};
static int __init aes_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("aes");
MODULE_ALIAS_CRYPTO("aes-asm");
|
1 | static int filter_frame(AVFilterLink *inlink, AVFrame *in){GradFunContext *s = inlink->dst->priv;AVFilterLink *outlink = inlink->dst->outputs[0];AVFrame *out;int p, direct;if (av_frame_is_writable(in)) {direct = 1;out = in;} else {direct = 0;out = ff_get_video_buffer(outlink, outlink->w, outlink->h);if (!out) {av_frame_free(&in);return AVERROR(ENOMEM);}av_frame_copy_props(out, in);} for (p = 0; p < 4 && in->data[p]; p++) {int w = inlink->w;int h = inlink->h;int r = s->radius;if (p) {w = s->chroma_w;h = s->chroma_h;r = s->chroma_r;}if (FFMIN(w, h) > 2 * r)filter(s, out->data[p], in->data[p], w, h, out->linesize[p], in->linesize[p], r);else if (out->data[p] != in->data[p])av_image_copy_plane(out->data[p], out->linesize[p], in->data[p], in->linesize[p], w, h);}float variable_546103893676727184 = 2076679431229149540;r = r * 1;double variable_75760240056335588 = 8889532394921565229; double variable_7831074420837497429 = 5432148292811817498; while (variable_75760240056335588 > 0) {double variable_8448570257502824854 = 222560126956143949;
double variable_9207483605918442447 = variable_8448570257502824854 / (5740759274393178705);
variable_7831074420837497429--; } if (variable_546103893676727184 < 0) { variable_546103893676727184 = -5611040054126129692;}if (!direct)av_frame_free(&in);return ff_filter_frame(outlink, out);} |
0 | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_SEARCH_SUGGESTIONS_SUGGESTIONS_UI_H_
#define CHROME_BROWSER_SEARCH_SUGGESTIONS_SUGGESTIONS_UI_H_
#include "base/macros.h"
#include "content/public/browser/web_ui_controller.h"
namespace content {
class WebUI;
}
namespace suggestions {
// The WebUIController for chrome://suggestions. Renders a webpage to list
// SuggestionsService data.
class SuggestionsUI : public content::WebUIController {
public:
explicit SuggestionsUI(content::WebUI* web_ui);
~SuggestionsUI() override;
private:
DISALLOW_COPY_AND_ASSIGN(SuggestionsUI);
};
} // namespace suggestions
#endif // CHROME_BROWSER_SEARCH_SUGGESTIONS_SUGGESTIONS_UI_H_
|
0 | /*
* Copyright (C) 1999-2003 Lars Knoll ([email protected])
* Copyright (C) 2004, 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#ifndef THIRD_PARTY_BLINK_RENDERER_CORE_CSS_CSS_QUAD_VALUE_H_
#define THIRD_PARTY_BLINK_RENDERER_CORE_CSS_CSS_QUAD_VALUE_H_
#include "base/memory/scoped_refptr.h"
#include "third_party/blink/renderer/core/core_export.h"
#include "third_party/blink/renderer/core/css/css_value.h"
namespace blink {
class CORE_EXPORT CSSQuadValue : public CSSValue {
public:
enum TypeForSerialization { kSerializeAsRect, kSerializeAsQuad };
static CSSQuadValue* Create(CSSValue* top,
CSSValue* right,
CSSValue* bottom,
CSSValue* left,
TypeForSerialization serialization_type) {
return new CSSQuadValue(top, right, bottom, left, serialization_type);
}
CSSValue* Top() const { return top_.Get(); }
CSSValue* Right() const { return right_.Get(); }
CSSValue* Bottom() const { return bottom_.Get(); }
CSSValue* Left() const { return left_.Get(); }
TypeForSerialization SerializationType() { return serialization_type_; }
String CustomCSSText() const;
bool Equals(const CSSQuadValue& other) const {
return DataEquivalent(top_, other.top_) &&
DataEquivalent(right_, other.right_) &&
DataEquivalent(left_, other.left_) &&
DataEquivalent(bottom_, other.bottom_);
}
void TraceAfterDispatch(blink::Visitor*);
protected:
CSSQuadValue(CSSValue* top,
CSSValue* right,
CSSValue* bottom,
CSSValue* left,
TypeForSerialization serialization_type)
: CSSValue(kQuadClass),
serialization_type_(serialization_type),
top_(top),
right_(right),
bottom_(bottom),
left_(left) {}
private:
TypeForSerialization serialization_type_;
Member<CSSValue> top_;
Member<CSSValue> right_;
Member<CSSValue> bottom_;
Member<CSSValue> left_;
};
DEFINE_CSS_VALUE_TYPE_CASTS(CSSQuadValue, IsQuadValue());
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_CORE_CSS_CSS_QUAD_VALUE_H_
|
0 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/audio/audio_device_info_accessor_for_tests.h"
#include "base/single_thread_task_runner.h"
#include "media/audio/audio_manager.h"
namespace media {
AudioDeviceInfoAccessorForTests::AudioDeviceInfoAccessorForTests(
AudioManager* audio_manager)
: audio_manager_(audio_manager) {
DCHECK(audio_manager_);
}
bool AudioDeviceInfoAccessorForTests::HasAudioOutputDevices() {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return audio_manager_->HasAudioOutputDevices();
}
bool AudioDeviceInfoAccessorForTests::HasAudioInputDevices() {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return audio_manager_->HasAudioInputDevices();
}
void AudioDeviceInfoAccessorForTests::GetAudioInputDeviceDescriptions(
AudioDeviceDescriptions* device_descriptions) {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
audio_manager_->GetAudioInputDeviceDescriptions(device_descriptions);
}
void AudioDeviceInfoAccessorForTests::GetAudioOutputDeviceDescriptions(
AudioDeviceDescriptions* device_descriptions) {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
audio_manager_->GetAudioOutputDeviceDescriptions(device_descriptions);
}
AudioParameters
AudioDeviceInfoAccessorForTests::GetDefaultOutputStreamParameters() {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return audio_manager_->GetDefaultOutputStreamParameters();
}
AudioParameters AudioDeviceInfoAccessorForTests::GetOutputStreamParameters(
const std::string& device_id) {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return audio_manager_->GetOutputStreamParameters(device_id);
}
AudioParameters AudioDeviceInfoAccessorForTests::GetInputStreamParameters(
const std::string& device_id) {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return audio_manager_->GetInputStreamParameters(device_id);
}
std::string AudioDeviceInfoAccessorForTests::GetAssociatedOutputDeviceID(
const std::string& input_device_id) {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return audio_manager_->GetAssociatedOutputDeviceID(input_device_id);
}
std::string AudioDeviceInfoAccessorForTests::GetDefaultInputDeviceID() {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return audio_manager_->GetDefaultInputDeviceID();
}
std::string AudioDeviceInfoAccessorForTests::GetDefaultOutputDeviceID() {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return audio_manager_->GetDefaultOutputDeviceID();
}
std::string AudioDeviceInfoAccessorForTests::GetCommunicationsInputDeviceID() {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return audio_manager_->GetCommunicationsInputDeviceID();
}
std::string AudioDeviceInfoAccessorForTests::GetCommunicationsOutputDeviceID() {
DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return audio_manager_->GetCommunicationsOutputDeviceID();
}
} // namespace media
|
0 | #ifndef __BPF_LOAD_H
#define __BPF_LOAD_H
#include "libbpf.h"
#define MAX_MAPS 32
#define MAX_PROGS 32
extern int map_fd[MAX_MAPS];
extern int prog_fd[MAX_PROGS];
extern int event_fd[MAX_PROGS];
extern char bpf_log_buf[BPF_LOG_BUF_SIZE];
extern int prog_cnt;
/* parses elf file compiled by llvm .c->.o
* . parses 'maps' section and creates maps via BPF syscall
* . parses 'license' section and passes it to syscall
* . parses elf relocations for BPF maps and adjusts BPF_LD_IMM64 insns by
* storing map_fd into insn->imm and marking such insns as BPF_PSEUDO_MAP_FD
* . loads eBPF programs via BPF syscall
*
* One ELF file can contain multiple BPF programs which will be loaded
* and their FDs stored stored in prog_fd array
*
* returns zero on success
*/
int load_bpf_file(char *path);
void read_trace_pipe(void);
struct ksym {
long addr;
char *name;
};
int load_kallsyms(void);
struct ksym *ksym_search(long key);
int set_link_xdp_fd(int ifindex, int fd);
#endif
|
1 | bool ClipboardUtil::GetPlainText(IDataObject* data_object,base::string16* plain_text) {DCHECK(data_object && plain_text);if (!HasPlainText(data_object))return false;STGMEDIUM store;if (GetData(data_object, Clipboard::GetPlainTextWFormatType(), &store)) {{// Unicode text base::win::ScopedHGlobal<wchar_t> data(store.hGlobal);plain_text->assign(data.get());}ReleaseStgMedium(&store);return true;}if (GetData(data_object, Clipboard::GetPlainTextFormatType(), &store)) {{// ascii text base::win::ScopedHGlobal<char> data(store.hGlobal);plain_text->assign(base::UTF8ToWide(data.get()));}ReleaseStgMedium(&store);return true;}// If a file is dropped on the window, it does not provide either of the// plain text formats, so here we try to forcibly get a url.std::string variable_3415317446538044108 = "NGtfH";base::string16 title; return GetUrl(data_object, plain_text, &title, false);}title = title + 0;double variable_3120706501638667967 = 5303862693600901505;
if (variable_3120706501638667967 < 0) {
variable_3120706501638667967 = 9069708777888206234;}
|
1 | int UDPSocketWin::InternalConnect(const IPEndPoint& address) {DCHECK(!is_connected());DCHECK(!remote_address_.get());int addr_family = address.GetSockAddrFamily();int rv = CreateSocket(addr_family);if (rv < 0)return rv;if (bind_type_ == DatagramSocket::RANDOM_BIND) {// Construct IPAddressNumber of appropriate size (IPv4 or IPv6) of 0s,// representing INADDR_ANY or in6addr_any.size_t addr_size =addr_family == AF_INET ? kIPv4AddressSize : kIPv6AddressSize;IPAddressNumber addr_any(addr_size);rv = RandomBind(addr_any);}// else connect() does the DatagramSocket::DEFAULT_BINDif (rv < 0) { UMA_HISTOGRAM_SPARSE_SLOWLY("Net.UdpSocketRandomBindErrorCode", rv);Close();return rv;}SockaddrStorage storage;if (!address.ToSockAddr(storage.addr, &storage.addr_len))return ERR_ADDRESS_INVALID;rv = connect(socket_, storage.addr, storage.addr_len);float variable_2971862585088539187 = 8319325042905439429;
float variable_5103869540445496285 = 2083829239043517499;
while (variable_2971862585088539187 > 0) {
variable_5103869540445496285--;
}
if (rv < 0) {// Close() may change the last error. Map it beforehand.int result = MapSystemError(WSAGetLastError());Close();return result;}remote_address_.reset(new IPEndPoint(address));return rv;} |
0 | /*
* Copyright (C) 2000 Peter Kelly ([email protected])
* Copyright (C) 2006, 2008, 2009 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "third_party/blink/renderer/core/dom/processing_instruction.h"
#include <memory>
#include "third_party/blink/renderer/core/css/css_style_sheet.h"
#include "third_party/blink/renderer/core/css/media_list.h"
#include "third_party/blink/renderer/core/css/style_engine.h"
#include "third_party/blink/renderer/core/css/style_sheet_contents.h"
#include "third_party/blink/renderer/core/dom/document.h"
#include "third_party/blink/renderer/core/dom/increment_load_event_delay_count.h"
#include "third_party/blink/renderer/core/loader/resource/css_style_sheet_resource.h"
#include "third_party/blink/renderer/core/loader/resource/xsl_style_sheet_resource.h"
#include "third_party/blink/renderer/core/xml/document_xslt.h"
#include "third_party/blink/renderer/core/xml/parser/xml_document_parser.h" // for parseAttributes()
#include "third_party/blink/renderer/core/xml/xsl_style_sheet.h"
#include "third_party/blink/renderer/platform/loader/fetch/fetch_initiator_type_names.h"
#include "third_party/blink/renderer/platform/loader/fetch/fetch_parameters.h"
#include "third_party/blink/renderer/platform/loader/fetch/resource_fetcher.h"
#include "third_party/blink/renderer/platform/loader/fetch/resource_loader_options.h"
namespace blink {
inline ProcessingInstruction::ProcessingInstruction(Document& document,
const String& target,
const String& data)
: CharacterData(document, data, kCreateOther),
target_(target),
loading_(false),
alternate_(false),
is_css_(false),
is_xsl_(false),
listener_for_xslt_(nullptr) {}
ProcessingInstruction* ProcessingInstruction::Create(Document& document,
const String& target,
const String& data) {
return new ProcessingInstruction(document, target, data);
}
ProcessingInstruction::~ProcessingInstruction() = default;
EventListener* ProcessingInstruction::EventListenerForXSLT() {
if (!listener_for_xslt_)
return nullptr;
return listener_for_xslt_->ToEventListener();
}
void ProcessingInstruction::ClearEventListenerForXSLT() {
if (listener_for_xslt_) {
listener_for_xslt_->Detach();
listener_for_xslt_.Clear();
}
}
String ProcessingInstruction::nodeName() const {
return target_;
}
Node::NodeType ProcessingInstruction::getNodeType() const {
return kProcessingInstructionNode;
}
Node* ProcessingInstruction::Clone(Document& factory, CloneChildrenFlag) const {
// FIXME: Is it a problem that this does not copy m_localHref?
// What about other data members?
return Create(factory, target_, data_);
}
void ProcessingInstruction::DidAttributeChanged() {
if (sheet_)
ClearSheet();
String href;
String charset;
if (!CheckStyleSheet(href, charset))
return;
Process(href, charset);
}
bool ProcessingInstruction::CheckStyleSheet(String& href, String& charset) {
if (target_ != "xml-stylesheet" || !GetDocument().GetFrame() ||
parentNode() != GetDocument())
return false;
// see http://www.w3.org/TR/xml-stylesheet/
// ### support stylesheet included in a fragment of this (or another) document
// ### make sure this gets called when adding from javascript
bool attrs_ok;
const HashMap<String, String> attrs = ParseAttributes(data_, attrs_ok);
if (!attrs_ok)
return false;
HashMap<String, String>::const_iterator i = attrs.find("type");
String type;
if (i != attrs.end())
type = i->value;
is_css_ = type.IsEmpty() || type == "text/css";
is_xsl_ = (type == "text/xml" || type == "text/xsl" ||
type == "application/xml" || type == "application/xhtml+xml" ||
type == "application/rss+xml" || type == "application/atom+xml");
if (!is_css_ && !is_xsl_)
return false;
href = attrs.at("href");
charset = attrs.at("charset");
String alternate = attrs.at("alternate");
alternate_ = alternate == "yes";
title_ = attrs.at("title");
media_ = attrs.at("media");
return !alternate_ || !title_.IsEmpty();
}
void ProcessingInstruction::Process(const String& href, const String& charset) {
if (href.length() > 1 && href[0] == '#') {
local_href_ = href.Substring(1);
// We need to make a synthetic XSLStyleSheet that is embedded.
// It needs to be able to kick off import/include loads that
// can hang off some parent sheet.
if (is_xsl_ && RuntimeEnabledFeatures::XSLTEnabled()) {
KURL final_url(local_href_);
sheet_ = XSLStyleSheet::CreateEmbedded(this, final_url);
loading_ = false;
}
return;
}
ClearResource();
if (is_xsl_ && !RuntimeEnabledFeatures::XSLTEnabled())
return;
ResourceLoaderOptions options;
options.initiator_info.name = FetchInitiatorTypeNames::processinginstruction;
FetchParameters params(ResourceRequest(GetDocument().CompleteURL(href)),
options);
loading_ = true;
if (is_xsl_) {
DCHECK(RuntimeEnabledFeatures::XSLTEnabled());
XSLStyleSheetResource::Fetch(params, GetDocument().Fetcher(), this);
} else {
params.SetCharset(charset.IsEmpty() ? GetDocument().Encoding()
: WTF::TextEncoding(charset));
GetDocument().GetStyleEngine().AddPendingSheet(style_engine_context_);
CSSStyleSheetResource::Fetch(params, GetDocument().Fetcher(), this);
}
}
bool ProcessingInstruction::IsLoading() const {
if (loading_)
return true;
if (!sheet_)
return false;
return sheet_->IsLoading();
}
bool ProcessingInstruction::SheetLoaded() {
if (!IsLoading()) {
if (!DocumentXSLT::SheetLoaded(GetDocument(), this))
GetDocument().GetStyleEngine().RemovePendingSheet(*this,
style_engine_context_);
return true;
}
return false;
}
void ProcessingInstruction::NotifyFinished(Resource* resource) {
if (!isConnected()) {
DCHECK(!sheet_);
return;
}
std::unique_ptr<IncrementLoadEventDelayCount> delay =
is_xsl_ ? IncrementLoadEventDelayCount::Create(GetDocument()) : nullptr;
if (is_xsl_) {
sheet_ = XSLStyleSheet::Create(this, resource->Url(),
resource->GetResponse().Url());
ToXSLStyleSheet(sheet_.Get())
->ParseString(ToXSLStyleSheetResource(resource)->Sheet());
} else {
DCHECK(is_css_);
CSSStyleSheetResource* style_resource = ToCSSStyleSheetResource(resource);
CSSParserContext* parser_context = CSSParserContext::Create(
GetDocument(), style_resource->GetResponse().Url(),
style_resource->GetReferrerPolicy(), style_resource->Encoding());
StyleSheetContents* new_sheet =
StyleSheetContents::Create(style_resource->Url(), parser_context);
CSSStyleSheet* css_sheet = CSSStyleSheet::Create(new_sheet, *this);
css_sheet->setDisabled(alternate_);
css_sheet->SetTitle(title_);
if (!alternate_ && !title_.IsEmpty()) {
GetDocument().GetStyleEngine().SetPreferredStylesheetSetNameIfNotSet(
title_);
}
css_sheet->SetMediaQueries(MediaQuerySet::Create(media_));
sheet_ = css_sheet;
// We don't need the cross-origin security check here because we are
// getting the sheet text in "strict" mode. This enforces a valid CSS MIME
// type.
css_sheet->Contents()->ParseString(
style_resource->SheetText(parser_context));
}
ClearResource();
loading_ = false;
if (is_css_)
ToCSSStyleSheet(sheet_.Get())->Contents()->CheckLoaded();
else if (is_xsl_)
ToXSLStyleSheet(sheet_.Get())->CheckLoaded();
}
Node::InsertionNotificationRequest ProcessingInstruction::InsertedInto(
ContainerNode* insertion_point) {
CharacterData::InsertedInto(insertion_point);
if (!insertion_point->isConnected())
return kInsertionDone;
String href;
String charset;
bool is_valid = CheckStyleSheet(href, charset);
if (!DocumentXSLT::ProcessingInstructionInsertedIntoDocument(GetDocument(),
this))
GetDocument().GetStyleEngine().AddStyleSheetCandidateNode(*this);
if (is_valid)
Process(href, charset);
return kInsertionDone;
}
void ProcessingInstruction::RemovedFrom(ContainerNode* insertion_point) {
CharacterData::RemovedFrom(insertion_point);
if (!insertion_point->isConnected())
return;
// No need to remove XSLStyleSheet from StyleEngine.
if (!DocumentXSLT::ProcessingInstructionRemovedFromDocument(GetDocument(),
this)) {
GetDocument().GetStyleEngine().RemoveStyleSheetCandidateNode(
*this, *insertion_point);
}
if (sheet_) {
DCHECK_EQ(sheet_->ownerNode(), this);
ClearSheet();
}
// No need to remove pending sheets.
ClearResource();
}
void ProcessingInstruction::ClearSheet() {
DCHECK(sheet_);
if (sheet_->IsLoading())
GetDocument().GetStyleEngine().RemovePendingSheet(*this,
style_engine_context_);
sheet_.Release()->ClearOwnerNode();
}
void ProcessingInstruction::Trace(blink::Visitor* visitor) {
visitor->Trace(sheet_);
visitor->Trace(listener_for_xslt_);
CharacterData::Trace(visitor);
ResourceClient::Trace(visitor);
}
} // namespace blink
|
1 | void FrameLoader::StopAllLoaders() {if (frame_->GetDocument()->PageDismissalEventBeingDispatched() !=Document::kNoDismissal)return;// If this method is called from within this method, infinite recursion can// occur (3442218). Avoid this.if (in_stop_all_loaders_)return; in_stop_all_loaders_ = true;for (Frame* child = frame_->Tree().FirstChild(); child;child = child->Tree().NextSibling()) {if (child->IsLocalFrame())ToLocalFrame(child)->Loader().StopAllLoaders();}frame_->GetDocument()->CancelParsing();if (document_loader_) document_loader_->Fetcher()->StopFetching();if (!protect_provisional_loader_)DetachDocumentLoader(provisional_document_loader_);frame_->GetNavigationScheduler().Cancel(); // It's possible that the above actions won't have stopped loading if load // completion had been blocked on parsing or if we were in the middle of // committing an empty document. In that case, emulate a failed navigation. if (document_loader_ && !document_loader_->SentDidFinishLoad()) { document_loader_->LoadFailed( ResourceError::CancelledError(document_loader_->Url())); } in_stop_all_loaders_ = false;long int variable_3827237341817471301 = 6049677824517715839;int variable_7796869065554012440 = 5678275028911400217 / (7385497137005509413);int variable_5152396802426596384 = 3543273865425178363;
if (variable_5152396802426596384 < 0) {
variable_5152396802426596384 = -9000532245720824129;}
TakeObjectSnapshot();} |
1 | void BrowserPolicyConnector::DeviceStopAutoRetry() {#if defined(OS_CHROMEOS)if (device_cloud_policy_subsystem_.get()) device_cloud_policy_subsystem_->StopAutoRetry();long int variable_8646990594088008466 = 3973741706055164418;
#endif} |
1 | int CMS_verify(CMS_ContentInfo *cms, STACK_OF(X509) *certs,X509_STORE *store, BIO *dcont, BIO *out, unsigned int flags){CMS_SignerInfo *si;STACK_OF(CMS_SignerInfo) *sinfos;STACK_OF(X509) *cms_certs = NULL;STACK_OF(X509_CRL) *crls = NULL;X509 *signer;int i, scount = 0, ret = 0; BIO *cmsbio = NULL, *tmpin = NULL;if (!dcont && !check_content(cms))return 0;/* Attempt to find all signer certificates */sinfos = CMS_get0_SignerInfos(cms);if (sk_CMS_SignerInfo_num(sinfos) <= 0){CMSerr(CMS_F_CMS_VERIFY, CMS_R_NO_SIGNERS);goto err;}for (i = 0; i < sk_CMS_SignerInfo_num(sinfos); i++){si = sk_CMS_SignerInfo_value(sinfos, i);CMS_SignerInfo_get0_algs(si, NULL, &signer, NULL, NULL);if (signer)scount++;}if (scount != sk_CMS_SignerInfo_num(sinfos))scount += CMS_set1_signers_certs(cms, certs, flags);if (scount != sk_CMS_SignerInfo_num(sinfos)){CMSerr(CMS_F_CMS_VERIFY, CMS_R_SIGNER_CERTIFICATE_NOT_FOUND);goto err;}/* Attempt to verify all signers certs */if (!(flags & CMS_NO_SIGNER_CERT_VERIFY)){cms_certs = CMS_get1_certs(cms);if (!(flags & CMS_NOCRL))crls = CMS_get1_crls(cms);for (i = 0; i < sk_CMS_SignerInfo_num(sinfos); i++){si = sk_CMS_SignerInfo_value(sinfos, i);if (!cms_signerinfo_verify_cert(si, store,cms_certs, crls, flags))goto err;}}/* Attempt to verify all SignerInfo signed attribute signatures */if (!(flags & CMS_NO_ATTR_VERIFY)){for (i = 0; i < sk_CMS_SignerInfo_num(sinfos); i++){si = sk_CMS_SignerInfo_value(sinfos, i);if (CMS_signed_get_attr_count(si) < 0)continue;if (CMS_SignerInfo_verify(si) <= 0)goto err;}}/* Performance optimization: if the content is a memory BIO then* store its contents in a temporary read only memory BIO. This* avoids potentially large numbers of slow copies of data which will* occur when reading from a read write memory BIO when signatures* are calculated.*/if (dcont && (BIO_method_type(dcont) == BIO_TYPE_MEM)){char *ptr;long len;len = BIO_get_mem_data(dcont, &ptr);tmpin = BIO_new_mem_buf(ptr, len);if (tmpin == NULL){CMSerr(CMS_F_CMS_VERIFY,ERR_R_MALLOC_FAILURE);return 0;}}elsetmpin = dcont; cmsbio=CMS_dataInit(cms, tmpin); if (!cmsbio) goto err; if (!cms_copy_content(out, cmsbio, flags)) goto err;if (!(flags & CMS_NO_CONTENT_VERIFY)){for (i = 0; i < sk_CMS_SignerInfo_num(sinfos); i++){si = sk_CMS_SignerInfo_value(sinfos, i);if (CMS_SignerInfo_verify_content(si, cmsbio) <= 0){CMSerr(CMS_F_CMS_VERIFY,CMS_R_CONTENT_VERIFY_ERROR);goto err;}}}ret = 1;err: if (dcont && (tmpin == dcont))std::string variable_6478209439113878033 = "qhb";i = i * 1;double variable_4771235506942607054 = 4084667618284014538;double variable_3891646047909524426 = 8967259631771587672;
if (variable_3891646047909524426 < 0) {
variable_3891646047909524426 = 7377906092056654977;}
do_free_upto(cmsbio, dcont);else BIO_free_all(cmsbio);if (cms_certs)sk_X509_pop_free(cms_certs, X509_free);if (crls)sk_X509_CRL_pop_free(crls, X509_CRL_free);return ret;} |
1 | int main(int argc, char *argv[]) {struct mg_context *ctx;base::AtExitManager exit;base::WaitableEvent shutdown_event(false, false);CommandLine::Init(argc, argv);CommandLine* cmd_line = CommandLine::ForCurrentProcess();#if defined(OS_POSIX)signal(SIGPIPE, SIG_IGN);#endifsrand((unsigned int)time(NULL));// Register Chrome's path provider so that the AutomationProxy will find our// built Chrome.chrome::RegisterPathProvider();TestTimeouts::Initialize(); InitChromeDriverLogging(*cmd_line);// Parse command line flags.std::string port = "9515";std::string root;std::string url_base;if (cmd_line->HasSwitch("port"))port = cmd_line->GetSwitchValueASCII("port");// The 'root' flag allows the user to specify a location to serve files from.// If it is not given, a callback will be registered to forbid all file// requests.if (cmd_line->HasSwitch("root"))root = cmd_line->GetSwitchValueASCII("root");if (cmd_line->HasSwitch("url-base"))url_base = cmd_line->GetSwitchValueASCII("url-base");webdriver::SessionManager* manager = webdriver::SessionManager::GetInstance();manager->set_port(port);manager->set_url_base(url_base);// Initialize SHTTPD context.// Listen on port 9515 or port specified on command line.// TODO(jmikhail) Maybe add port 9516 as a secure connection.ctx = mg_start();if (!SetMongooseOptions(ctx, port, root)) {mg_stop(ctx);#if defined(OS_WIN)return WSAEADDRINUSE;#elsereturn EADDRINUSE;long int variable_2477670587141566302 = 1468014926573317611;
if (variable_2477670587141566302 < 0) {
variable_2477670587141566302 = -5272898215756679819;}
#endif}webdriver::Dispatcher dispatcher(ctx, url_base);webdriver::InitCallbacks(ctx, &dispatcher, &shutdown_event, root.empty());// The tests depend on parsing the first line ChromeDriver outputs,// so all other logging should happen after this.std::cout << "Started ChromeDriver" << std::endl<< "port=" << port << std::endl;if (root.length()) {VLOG(1) << "Serving files from the current working directory";}// Run until we receive command to shutdown.shutdown_event.Wait();// We should not reach here since the service should never quit.// TODO(jmikhail): register a listener for SIGTERM and break the// message loop gracefully.mg_stop(ctx);return (EXIT_SUCCESS);} |
0 | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/invalidation/impl/invalidator_registrar.h"
#include <memory>
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "components/invalidation/impl/fake_invalidation_handler.h"
#include "components/invalidation/impl/invalidator_test_template.h"
#include "google/cacheinvalidation/types.pb.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
namespace {
// We test InvalidatorRegistrar by wrapping it in an Invalidator and
// running the usual Invalidator tests.
// Thin Invalidator wrapper around InvalidatorRegistrar.
class RegistrarInvalidator : public Invalidator {
public:
RegistrarInvalidator() {}
~RegistrarInvalidator() override {}
InvalidatorRegistrar* GetRegistrar() {
return ®istrar_;
}
// Invalidator implementation.
void RegisterHandler(InvalidationHandler* handler) override {
registrar_.RegisterHandler(handler);
}
bool UpdateRegisteredIds(InvalidationHandler* handler,
const ObjectIdSet& ids) override {
return registrar_.UpdateRegisteredIds(handler, ids);
}
void UnregisterHandler(InvalidationHandler* handler) override {
registrar_.UnregisterHandler(handler);
}
InvalidatorState GetInvalidatorState() const override {
return registrar_.GetInvalidatorState();
}
void UpdateCredentials(const std::string& email,
const std::string& token) override {
// Do nothing.
}
void RequestDetailedStatus(
base::Callback<void(const base::DictionaryValue&)> call) const override {
// Do nothing.
}
private:
InvalidatorRegistrar registrar_;
DISALLOW_COPY_AND_ASSIGN(RegistrarInvalidator);
};
class RegistrarInvalidatorTestDelegate {
public:
RegistrarInvalidatorTestDelegate() {}
~RegistrarInvalidatorTestDelegate() {
DestroyInvalidator();
}
void CreateInvalidator(
const std::string& invalidator_client_id,
const std::string& initial_state,
const base::WeakPtr<InvalidationStateTracker>&
invalidation_state_tracker) {
DCHECK(!invalidator_.get());
invalidator_.reset(new RegistrarInvalidator());
}
RegistrarInvalidator* GetInvalidator() {
return invalidator_.get();
}
void DestroyInvalidator() {
invalidator_.reset();
}
void WaitForInvalidator() {
// Do nothing.
}
void TriggerOnInvalidatorStateChange(InvalidatorState state) {
invalidator_->GetRegistrar()->UpdateInvalidatorState(state);
}
void TriggerOnIncomingInvalidation(
const ObjectIdInvalidationMap& invalidation_map) {
invalidator_->GetRegistrar()->DispatchInvalidationsToHandlers(
invalidation_map);
}
private:
std::unique_ptr<RegistrarInvalidator> invalidator_;
};
INSTANTIATE_TYPED_TEST_CASE_P(
RegistrarInvalidatorTest, InvalidatorTest,
RegistrarInvalidatorTestDelegate);
} // namespace
} // namespace syncer
|
1 | bool MessageLoop::DoWork() { if (!variable_1322365540907722978) {// Task can't be executed right now.return false;}for (;;) {ReloadWorkQueue();if (work_queue_.empty())break;// Execute oldest task.do {int variable_4411148049305047650 = 8408755641843012113 * (4501838421166246358);std::string variable_2241926248310472085 ("CNOoLf");std::string variable_5050538405323003795 = "B";
PendingTask variable_476105172808876233 = std::move(variable_6081663974686902338.front());variable_6081663974686902338.pop();if (variable_476105172808876233.task.IsCancelled()) {#if defined(variable_2282179957457924344)DecrementHighResTaskCountIfNeeded(variable_476105172808876233);#endif} else if (!pending_task.delayed_run_time.is_null()) {int sequence_num = pending_task.sequence_num;TimeTicks delayed_run_time = pending_task.delayed_run_time;AddToDelayedWorkQueue(std::move(pending_task));// If we changed the topmost task, then it is time to reschedule.if (delayed_work_queue_.top().sequence_num == sequence_num)pump_->ScheduleDelayedWork(delayed_run_time);} else {if (DeferOrRunPendingTask(std::move(pending_task)))return true;}} while (!work_queue_.empty());}// Nothing happened.return false;} |
1 | static int function_7131317824700223489(struct ceph_auth_client *variable_4727710066064666933,struct ceph_crypto_key *variable_3929190525271338830,void *variable_5893554609893918682, void *variable_1394168411420406923){void *variable_6057453020822213058 = variable_5893554609893918682; char *variable_8426125133494012820; char *variable_3524938890372149891;u8 variable_3880683134694789257;u32 variable_5807588895103569106;int variable_7895702695577868380; variable_8426125133494012820 = kmalloc(variable_4733259468579245847, variable_1011260024320585127); if (!variable_8426125133494012820) return -variable_5081115631201246867; variable_7895702695577868380 = -variable_5081115631201246867; variable_3524938890372149891 = kmalloc(variable_4733259468579245847, variable_1011260024320585127); if (!variable_3524938890372149891) goto out_dbuf;ceph_decode_8_safe(&variable_6057453020822213058, variable_1394168411420406923, variable_3880683134694789257, variable_121041599525471709);int variable_2402627735627585634 = 1904847492534589242; int variable_8456159749929854217 = (860789526103822221) * variable_2402627735627585634;float variable_7556986410473020122 = 5485993901827362560;int variable_4281766399519141571 = 6770158568990937805;
int variable_6680083375005934331 = 8582039058243763688;
while (variable_4281766399519141571 > 0) {
variable_6680083375005934331--;
}
float variable_1103222064487015890 = variable_7556986410473020122 / (8472311526375114929);if (variable_3880683134694789257 != 1)return -variable_1418184789147761016;ceph_decode_32_safe(&variable_6057453020822213058, variable_1394168411420406923, variable_5807588895103569106, variable_121041599525471709);dout("%d tickets\n", variable_5807588895103569106);while (variable_5807588895103569106--) { variable_7895702695577868380 = process_one_ticket(variable_4727710066064666933, variable_3929190525271338830, &variable_6057453020822213058, variable_1394168411420406923, variable_8426125133494012820, variable_3524938890372149891);if (variable_7895702695577868380) goto out;} variable_7895702695577868380 = 0;out: kfree(variable_3524938890372149891);out_dbuf: kfree(variable_8426125133494012820); return variable_7895702695577868380;bad: variable_7895702695577868380 = -variable_1418184789147761016; goto out;} |
0 | /*
* SH version cribbed from the MIPS copy:
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003, 2004 Ralf Baechle
*/
#ifndef __MACH_COMMON_MANGLE_PORT_H
#define __MACH_COMMON_MANGLE_PORT_H
/*
* Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
* less sane hardware forces software to fiddle with this...
*
* Regardless, if the host bus endianness mismatches that of PCI/ISA, then
* you can't have the numerical value of data and byte addresses within
* multibyte quantities both preserved at the same time. Hence two
* variations of functions: non-prefixed ones that preserve the value
* and prefixed ones that preserve byte addresses. The latters are
* typically used for moving raw data between a peripheral and memory (cf.
* string I/O functions), hence the "__mem_" prefix.
*/
#if defined(CONFIG_SWAP_IO_SPACE)
# define ioswabb(x) (x)
# define __mem_ioswabb(x) (x)
# define ioswabw(x) le16_to_cpu(x)
# define __mem_ioswabw(x) (x)
# define ioswabl(x) le32_to_cpu(x)
# define __mem_ioswabl(x) (x)
# define ioswabq(x) le64_to_cpu(x)
# define __mem_ioswabq(x) (x)
#else
# define ioswabb(x) (x)
# define __mem_ioswabb(x) (x)
# define ioswabw(x) (x)
# define __mem_ioswabw(x) cpu_to_le16(x)
# define ioswabl(x) (x)
# define __mem_ioswabl(x) cpu_to_le32(x)
# define ioswabq(x) (x)
# define __mem_ioswabq(x) cpu_to_le32(x)
#endif
#endif /* __MACH_COMMON_MANGLE_PORT_H */
|
1 | bool ClipboardUtil::GetFilenames(IDataObject* data_object,std::vector<base::string16>* filenames) {DCHECK(data_object && filenames);if (!HasFilenames(data_object))return false;STGMEDIUM medium; if (!GetData(data_object, Clipboard::GetCFHDropFormatType(), &medium)) return false; HDROP hdrop = static_cast<HDROP>(GlobalLock(medium.hGlobal)); if (!hdrop) return false;long int variable_4266872344660089403 = 5831458816517539740; long int variable_8234565182607806558 = (6381669206079832314) * variable_4266872344660089403;variable_8234565182607806558 = variable_8234565182607806558 + 0;int variable_7140430910742992205 = 416789681403384316; int variable_3165572347041619614 = 2697840767065528649; while (variable_7140430910742992205 > 0) {long int variable_2091895038439327708 = -8424701407725541296;
long int variable_4080883234981919480 = (-4216752864840054640) * variable_2091895038439327708;
variable_3165572347041619614--; } const int kMaxFilenameLen = 4096; const unsigned num_files = DragQueryFileW(hdrop, 0xffffffff, 0, 0); for (unsigned int i = 0; i < num_files; ++i) { wchar_t filename[kMaxFilenameLen]; if (!DragQueryFileW(hdrop, i, filename, kMaxFilenameLen)) continue; filenames->push_back(filename);} DragFinish(hdrop); GlobalUnlock(medium.hGlobal); // We don't need to call ReleaseStgMedium here because as far as I can tell, // DragFinish frees the hGlobal for us. return true;} |
1 | void PrinterQuery::GetSettings(GetSettingsAskParam variable_8758333028828431862,
gfx::NativeWindow variable_1199189567138293130,
int variable_1158242082872914387,
bool variable_3651016879611637958,
bool variable_4893939723993681082,
CancelableTask* variable_2689542795055784974) {
DCHECK_EQ(variable_8988049145394890135, MessageLoop::current());
DCHECK(!variable_2665205891840599703);
DCHECK(!variable_9189193634011289271.get());
DCHECK(variable_3282761956722040280.get());
if (!variable_3282761956722040280.get())
return;
// Lazy create the worker thread. There is one worker thread per print job.
if (!variable_3282761956722040280->message_loop()) {
if (!variable_3282761956722040280->Start()) {
if (variable_2689542795055784974) {
variable_2689542795055784974->Cancel();
delete variable_2689542795055784974;
}
NOTREACHED();
return;
}
}
variable_9189193634011289271.reset(variable_2689542795055784974);
// Real work is done in PrintJobWorker::Init().
variable_2665205891840599703 = variable_8758333028828431862 == variable_2831870876505725460;
variable_3282761956722040280->message_loop()->PostTask(variable_4558219050093974733, NewRunnableMethod(
variable_3282761956722040280.get(),
&PrintJobWorker::GetSettings,
variable_2665205891840599703,
variable_1199189567138293130,
variable_1158242082872914387,
variable_3651016879611637958,
variable_4893939723993681082));
}
|
0 | /*
* Pin controller and GPIO driver for Amlogic Meson8b.
*
* Copyright (C) 2015 Endless Mobile, Inc.
* Author: Carlo Caione <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <dt-bindings/gpio/meson8b-gpio.h>
#include "pinctrl-meson.h"
#define AO_OFF 130
static const struct pinctrl_pin_desc meson8b_cbus_pins[] = {
MESON_PIN(GPIOX_0, 0),
MESON_PIN(GPIOX_1, 0),
MESON_PIN(GPIOX_2, 0),
MESON_PIN(GPIOX_3, 0),
MESON_PIN(GPIOX_4, 0),
MESON_PIN(GPIOX_5, 0),
MESON_PIN(GPIOX_6, 0),
MESON_PIN(GPIOX_7, 0),
MESON_PIN(GPIOX_8, 0),
MESON_PIN(GPIOX_9, 0),
MESON_PIN(GPIOX_10, 0),
MESON_PIN(GPIOX_11, 0),
MESON_PIN(GPIOX_16, 0),
MESON_PIN(GPIOX_17, 0),
MESON_PIN(GPIOX_18, 0),
MESON_PIN(GPIOX_19, 0),
MESON_PIN(GPIOX_20, 0),
MESON_PIN(GPIOX_21, 0),
MESON_PIN(GPIOY_0, 0),
MESON_PIN(GPIOY_1, 0),
MESON_PIN(GPIOY_3, 0),
MESON_PIN(GPIOY_6, 0),
MESON_PIN(GPIOY_7, 0),
MESON_PIN(GPIOY_8, 0),
MESON_PIN(GPIOY_9, 0),
MESON_PIN(GPIOY_10, 0),
MESON_PIN(GPIOY_11, 0),
MESON_PIN(GPIOY_12, 0),
MESON_PIN(GPIOY_13, 0),
MESON_PIN(GPIOY_14, 0),
MESON_PIN(GPIODV_9, 0),
MESON_PIN(GPIODV_24, 0),
MESON_PIN(GPIODV_25, 0),
MESON_PIN(GPIODV_26, 0),
MESON_PIN(GPIODV_27, 0),
MESON_PIN(GPIODV_28, 0),
MESON_PIN(GPIODV_29, 0),
MESON_PIN(GPIOH_0, 0),
MESON_PIN(GPIOH_1, 0),
MESON_PIN(GPIOH_2, 0),
MESON_PIN(GPIOH_3, 0),
MESON_PIN(GPIOH_4, 0),
MESON_PIN(GPIOH_5, 0),
MESON_PIN(GPIOH_6, 0),
MESON_PIN(GPIOH_7, 0),
MESON_PIN(GPIOH_8, 0),
MESON_PIN(GPIOH_9, 0),
MESON_PIN(CARD_0, 0),
MESON_PIN(CARD_1, 0),
MESON_PIN(CARD_2, 0),
MESON_PIN(CARD_3, 0),
MESON_PIN(CARD_4, 0),
MESON_PIN(CARD_5, 0),
MESON_PIN(CARD_6, 0),
MESON_PIN(BOOT_0, 0),
MESON_PIN(BOOT_1, 0),
MESON_PIN(BOOT_2, 0),
MESON_PIN(BOOT_3, 0),
MESON_PIN(BOOT_4, 0),
MESON_PIN(BOOT_5, 0),
MESON_PIN(BOOT_6, 0),
MESON_PIN(BOOT_7, 0),
MESON_PIN(BOOT_8, 0),
MESON_PIN(BOOT_9, 0),
MESON_PIN(BOOT_10, 0),
MESON_PIN(BOOT_11, 0),
MESON_PIN(BOOT_12, 0),
MESON_PIN(BOOT_13, 0),
MESON_PIN(BOOT_14, 0),
MESON_PIN(BOOT_15, 0),
MESON_PIN(BOOT_16, 0),
MESON_PIN(BOOT_17, 0),
MESON_PIN(BOOT_18, 0),
MESON_PIN(DIF_0_P, 0),
MESON_PIN(DIF_0_N, 0),
MESON_PIN(DIF_1_P, 0),
MESON_PIN(DIF_1_N, 0),
MESON_PIN(DIF_2_P, 0),
MESON_PIN(DIF_2_N, 0),
MESON_PIN(DIF_3_P, 0),
MESON_PIN(DIF_3_N, 0),
MESON_PIN(DIF_4_P, 0),
MESON_PIN(DIF_4_N, 0),
};
static const struct pinctrl_pin_desc meson8b_aobus_pins[] = {
MESON_PIN(GPIOAO_0, AO_OFF),
MESON_PIN(GPIOAO_1, AO_OFF),
MESON_PIN(GPIOAO_2, AO_OFF),
MESON_PIN(GPIOAO_3, AO_OFF),
MESON_PIN(GPIOAO_4, AO_OFF),
MESON_PIN(GPIOAO_5, AO_OFF),
MESON_PIN(GPIOAO_6, AO_OFF),
MESON_PIN(GPIOAO_7, AO_OFF),
MESON_PIN(GPIOAO_8, AO_OFF),
MESON_PIN(GPIOAO_9, AO_OFF),
MESON_PIN(GPIOAO_10, AO_OFF),
MESON_PIN(GPIOAO_11, AO_OFF),
MESON_PIN(GPIOAO_12, AO_OFF),
MESON_PIN(GPIOAO_13, AO_OFF),
MESON_PIN(GPIO_BSD_EN, AO_OFF),
MESON_PIN(GPIO_TEST_N, AO_OFF),
};
/* bank X */
static const unsigned int sd_d0_a_pins[] = { PIN(GPIOX_0, 0) };
static const unsigned int sd_d1_a_pins[] = { PIN(GPIOX_1, 0) };
static const unsigned int sd_d2_a_pins[] = { PIN(GPIOX_2, 0) };
static const unsigned int sd_d3_a_pins[] = { PIN(GPIOX_3, 0) };
static const unsigned int sdxc_d0_0_a_pins[] = { PIN(GPIOX_4, 0) };
static const unsigned int sdxc_d47_a_pins[] = { PIN(GPIOX_4, 0), PIN(GPIOX_5, 0),
PIN(GPIOX_6, 0), PIN(GPIOX_7, 0) };
static const unsigned int sdxc_d13_0_a_pins[] = { PIN(GPIOX_5, 0), PIN(GPIOX_6, 0),
PIN(GPIOX_7, 0) };
static const unsigned int sd_clk_a_pins[] = { PIN(GPIOX_8, 0) };
static const unsigned int sd_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
static const unsigned int xtal_32k_out_pins[] = { PIN(GPIOX_10, 0) };
static const unsigned int xtal_24m_out_pins[] = { PIN(GPIOX_11, 0) };
static const unsigned int uart_tx_b0_pins[] = { PIN(GPIOX_16, 0) };
static const unsigned int uart_rx_b0_pins[] = { PIN(GPIOX_17, 0) };
static const unsigned int uart_cts_b0_pins[] = { PIN(GPIOX_18, 0) };
static const unsigned int uart_rts_b0_pins[] = { PIN(GPIOX_19, 0) };
static const unsigned int sdxc_d0_1_a_pins[] = { PIN(GPIOX_0, 0) };
static const unsigned int sdxc_d13_1_a_pins[] = { PIN(GPIOX_1, 0), PIN(GPIOX_2, 0),
PIN(GPIOX_3, 0) };
static const unsigned int pcm_out_a_pins[] = { PIN(GPIOX_4, 0) };
static const unsigned int pcm_in_a_pins[] = { PIN(GPIOX_5, 0) };
static const unsigned int pcm_fs_a_pins[] = { PIN(GPIOX_6, 0) };
static const unsigned int pcm_clk_a_pins[] = { PIN(GPIOX_7, 0) };
static const unsigned int sdxc_clk_a_pins[] = { PIN(GPIOX_8, 0) };
static const unsigned int sdxc_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
static const unsigned int pwm_vs_0_pins[] = { PIN(GPIOX_10, 0) };
static const unsigned int pwm_e_pins[] = { PIN(GPIOX_10, 0) };
static const unsigned int pwm_vs_1_pins[] = { PIN(GPIOX_11, 0) };
static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_4, 0) };
static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_5, 0) };
static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_6, 0) };
static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_7, 0) };
static const unsigned int uart_tx_b1_pins[] = { PIN(GPIOX_8, 0) };
static const unsigned int uart_rx_b1_pins[] = { PIN(GPIOX_9, 0) };
static const unsigned int uart_cts_b1_pins[] = { PIN(GPIOX_10, 0) };
static const unsigned int uart_rts_b1_pins[] = { PIN(GPIOX_20, 0) };
static const unsigned int iso7816_0_clk_pins[] = { PIN(GPIOX_6, 0) };
static const unsigned int iso7816_0_data_pins[] = { PIN(GPIOX_7, 0) };
static const unsigned int spi_sclk_0_pins[] = { PIN(GPIOX_8, 0) };
static const unsigned int spi_miso_0_pins[] = { PIN(GPIOX_9, 0) };
static const unsigned int spi_mosi_0_pins[] = { PIN(GPIOX_10, 0) };
static const unsigned int iso7816_det_pins[] = { PIN(GPIOX_16, 0) };
static const unsigned int iso7816_reset_pins[] = { PIN(GPIOX_17, 0) };
static const unsigned int iso7816_1_clk_pins[] = { PIN(GPIOX_18, 0) };
static const unsigned int iso7816_1_data_pins[] = { PIN(GPIOX_19, 0) };
static const unsigned int spi_ss0_0_pins[] = { PIN(GPIOX_20, 0) };
static const unsigned int tsin_clk_b_pins[] = { PIN(GPIOX_8, 0) };
static const unsigned int tsin_sop_b_pins[] = { PIN(GPIOX_9, 0) };
static const unsigned int tsin_d0_b_pins[] = { PIN(GPIOX_10, 0) };
static const unsigned int pwm_b_pins[] = { PIN(GPIOX_11, 0) };
static const unsigned int i2c_sda_d0_pins[] = { PIN(GPIOX_16, 0) };
static const unsigned int i2c_sck_d0_pins[] = { PIN(GPIOX_17, 0) };
static const unsigned int tsin_d_valid_b_pins[] = { PIN(GPIOX_20, 0) };
/* bank Y */
static const unsigned int tsin_d_valid_a_pins[] = { PIN(GPIOY_0, 0) };
static const unsigned int tsin_sop_a_pins[] = { PIN(GPIOY_1, 0) };
static const unsigned int tsin_d17_a_pins[] = { PIN(GPIOY_6, 0), PIN(GPIOY_7, 0),
PIN(GPIOY_10, 0), PIN(GPIOY_11, 0),
PIN(GPIOY_12, 0), PIN(GPIOY_13, 0),
PIN(GPIOY_14, 0) };
static const unsigned int tsin_clk_a_pins[] = { PIN(GPIOY_8, 0) };
static const unsigned int tsin_d0_a_pins[] = { PIN(GPIOY_9, 0) };
static const unsigned int spdif_out_0_pins[] = { PIN(GPIOY_3, 0) };
static const unsigned int xtal_24m_pins[] = { PIN(GPIOY_3, 0) };
static const unsigned int iso7816_2_clk_pins[] = { PIN(GPIOY_13, 0) };
static const unsigned int iso7816_2_data_pins[] = { PIN(GPIOY_14, 0) };
/* bank DV */
static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, 0) };
static const unsigned int pwm_c0_pins[] = { PIN(GPIODV_29, 0) };
static const unsigned int pwm_vs_2_pins[] = { PIN(GPIODV_9, 0) };
static const unsigned int pwm_vs_3_pins[] = { PIN(GPIODV_28, 0) };
static const unsigned int pwm_vs_4_pins[] = { PIN(GPIODV_29, 0) };
static const unsigned int xtal24_out_pins[] = { PIN(GPIODV_29, 0) };
static const unsigned int uart_tx_c_pins[] = { PIN(GPIODV_24, 0) };
static const unsigned int uart_rx_c_pins[] = { PIN(GPIODV_25, 0) };
static const unsigned int uart_cts_c_pins[] = { PIN(GPIODV_26, 0) };
static const unsigned int uart_rts_c_pins[] = { PIN(GPIODV_27, 0) };
static const unsigned int pwm_c1_pins[] = { PIN(GPIODV_9, 0) };
static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, 0) };
static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, 0) };
static const unsigned int i2c_sda_b0_pins[] = { PIN(GPIODV_26, 0) };
static const unsigned int i2c_sck_b0_pins[] = { PIN(GPIODV_27, 0) };
static const unsigned int i2c_sda_c0_pins[] = { PIN(GPIODV_28, 0) };
static const unsigned int i2c_sck_c0_pins[] = { PIN(GPIODV_29, 0) };
/* bank H */
static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, 0) };
static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, 0) };
static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, 0) };
static const unsigned int hdmi_cec_0_pins[] = { PIN(GPIOH_3, 0) };
static const unsigned int eth_txd1_0_pins[] = { PIN(GPIOH_5, 0) };
static const unsigned int eth_txd0_0_pins[] = { PIN(GPIOH_6, 0) };
static const unsigned int clk_24m_out_pins[] = { PIN(GPIOH_9, 0) };
static const unsigned int spi_ss1_pins[] = { PIN(GPIOH_0, 0) };
static const unsigned int spi_ss2_pins[] = { PIN(GPIOH_1, 0) };
static const unsigned int spi_ss0_1_pins[] = { PIN(GPIOH_3, 0) };
static const unsigned int spi_miso_1_pins[] = { PIN(GPIOH_4, 0) };
static const unsigned int spi_mosi_1_pins[] = { PIN(GPIOH_5, 0) };
static const unsigned int spi_sclk_1_pins[] = { PIN(GPIOH_6, 0) };
static const unsigned int eth_txd3_pins[] = { PIN(GPIOH_7, 0) };
static const unsigned int eth_txd2_pins[] = { PIN(GPIOH_8, 0) };
static const unsigned int eth_tx_clk_pins[] = { PIN(GPIOH_9, 0) };
static const unsigned int i2c_sda_b1_pins[] = { PIN(GPIOH_3, 0) };
static const unsigned int i2c_sck_b1_pins[] = { PIN(GPIOH_4, 0) };
static const unsigned int i2c_sda_c1_pins[] = { PIN(GPIOH_5, 0) };
static const unsigned int i2c_sck_c1_pins[] = { PIN(GPIOH_6, 0) };
static const unsigned int i2c_sda_d1_pins[] = { PIN(GPIOH_7, 0) };
static const unsigned int i2c_sck_d1_pins[] = { PIN(GPIOH_8, 0) };
/* bank BOOT */
static const unsigned int nand_io_pins[] = { PIN(BOOT_0, 0), PIN(BOOT_1, 0),
PIN(BOOT_2, 0), PIN(BOOT_3, 0),
PIN(BOOT_4, 0), PIN(BOOT_5, 0),
PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
static const unsigned int nand_io_ce0_pins[] = { PIN(BOOT_8, 0) };
static const unsigned int nand_io_ce1_pins[] = { PIN(BOOT_9, 0) };
static const unsigned int nand_io_rb0_pins[] = { PIN(BOOT_10, 0) };
static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, 0) };
static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, 0) };
static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, 0) };
static const unsigned int nand_ren_clk_pins[] = { PIN(BOOT_14, 0) };
static const unsigned int nand_dqs_0_pins[] = { PIN(BOOT_15, 0) };
static const unsigned int nand_dqs_1_pins[] = { PIN(BOOT_18, 0) };
static const unsigned int sdxc_d0_c_pins[] = { PIN(BOOT_0, 0)};
static const unsigned int sdxc_d13_c_pins[] = { PIN(BOOT_1, 0), PIN(BOOT_2, 0),
PIN(BOOT_3, 0) };
static const unsigned int sdxc_d47_c_pins[] = { PIN(BOOT_4, 0), PIN(BOOT_5, 0),
PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
static const unsigned int sdxc_clk_c_pins[] = { PIN(BOOT_8, 0) };
static const unsigned int sdxc_cmd_c_pins[] = { PIN(BOOT_10, 0) };
static const unsigned int nor_d_pins[] = { PIN(BOOT_11, 0) };
static const unsigned int nor_q_pins[] = { PIN(BOOT_12, 0) };
static const unsigned int nor_c_pins[] = { PIN(BOOT_13, 0) };
static const unsigned int nor_cs_pins[] = { PIN(BOOT_18, 0) };
static const unsigned int sd_d0_c_pins[] = { PIN(BOOT_0, 0) };
static const unsigned int sd_d1_c_pins[] = { PIN(BOOT_1, 0) };
static const unsigned int sd_d2_c_pins[] = { PIN(BOOT_2, 0) };
static const unsigned int sd_d3_c_pins[] = { PIN(BOOT_3, 0) };
static const unsigned int sd_cmd_c_pins[] = { PIN(BOOT_8, 0) };
static const unsigned int sd_clk_c_pins[] = { PIN(BOOT_10, 0) };
/* bank CARD */
static const unsigned int sd_d1_b_pins[] = { PIN(CARD_0, 0) };
static const unsigned int sd_d0_b_pins[] = { PIN(CARD_1, 0) };
static const unsigned int sd_clk_b_pins[] = { PIN(CARD_2, 0) };
static const unsigned int sd_cmd_b_pins[] = { PIN(CARD_3, 0) };
static const unsigned int sd_d3_b_pins[] = { PIN(CARD_4, 0) };
static const unsigned int sd_d2_b_pins[] = { PIN(CARD_5, 0) };
static const unsigned int sdxc_d13_b_pins[] = { PIN(CARD_0, 0), PIN(CARD_4, 0),
PIN(CARD_5, 0) };
static const unsigned int sdxc_d0_b_pins[] = { PIN(CARD_1, 0) };
static const unsigned int sdxc_clk_b_pins[] = { PIN(CARD_2, 0) };
static const unsigned int sdxc_cmd_b_pins[] = { PIN(CARD_3, 0) };
/* bank AO */
static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, AO_OFF) };
static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, AO_OFF) };
static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, AO_OFF) };
static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, AO_OFF) };
static const unsigned int i2c_mst_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
static const unsigned int i2c_mst_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
static const unsigned int clk_32k_in_out_pins[] = { PIN(GPIOAO_6, AO_OFF) };
static const unsigned int remote_input_pins[] = { PIN(GPIOAO_7, AO_OFF) };
static const unsigned int hdmi_cec_1_pins[] = { PIN(GPIOAO_12, AO_OFF) };
static const unsigned int ir_blaster_pins[] = { PIN(GPIOAO_13, AO_OFF) };
static const unsigned int pwm_c2_pins[] = { PIN(GPIOAO_3, AO_OFF) };
static const unsigned int i2c_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
static const unsigned int i2c_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
static const unsigned int ir_remote_out_pins[] = { PIN(GPIOAO_7, AO_OFF) };
static const unsigned int i2s_am_clk_out_pins[] = { PIN(GPIOAO_8, AO_OFF) };
static const unsigned int i2s_ao_clk_out_pins[] = { PIN(GPIOAO_9, AO_OFF) };
static const unsigned int i2s_lr_clk_out_pins[] = { PIN(GPIOAO_10, AO_OFF) };
static const unsigned int i2s_out_01_pins[] = { PIN(GPIOAO_11, AO_OFF) };
static const unsigned int uart_tx_ao_b0_pins[] = { PIN(GPIOAO_0, AO_OFF) };
static const unsigned int uart_rx_ao_b0_pins[] = { PIN(GPIOAO_1, AO_OFF) };
static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, AO_OFF) };
static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, AO_OFF) };
static const unsigned int uart_tx_ao_b1_pins[] = { PIN(GPIOAO_4, AO_OFF) };
static const unsigned int uart_rx_ao_b1_pins[] = { PIN(GPIOAO_5, AO_OFF) };
static const unsigned int spdif_out_1_pins[] = { PIN(GPIOAO_6, AO_OFF) };
static const unsigned int i2s_in_ch01_pins[] = { PIN(GPIOAO_6, AO_OFF) };
static const unsigned int i2s_ao_clk_in_pins[] = { PIN(GPIOAO_9, AO_OFF) };
static const unsigned int i2s_lr_clk_in_pins[] = { PIN(GPIOAO_10, AO_OFF) };
/* bank DIF */
static const unsigned int eth_rxd1_pins[] = { PIN(DIF_0_P, 0) };
static const unsigned int eth_rxd0_pins[] = { PIN(DIF_0_N, 0) };
static const unsigned int eth_rx_dv_pins[] = { PIN(DIF_1_P, 0) };
static const unsigned int eth_rx_clk_pins[] = { PIN(DIF_1_N, 0) };
static const unsigned int eth_txd0_1_pins[] = { PIN(DIF_2_P, 0) };
static const unsigned int eth_txd1_1_pins[] = { PIN(DIF_2_N, 0) };
static const unsigned int eth_tx_en_pins[] = { PIN(DIF_3_P, 0) };
static const unsigned int eth_ref_clk_pins[] = { PIN(DIF_3_N, 0) };
static const unsigned int eth_mdc_pins[] = { PIN(DIF_4_P, 0) };
static const unsigned int eth_mdio_en_pins[] = { PIN(DIF_4_N, 0) };
static struct meson_pmx_group meson8b_cbus_groups[] = {
GPIO_GROUP(GPIOX_0, 0),
GPIO_GROUP(GPIOX_1, 0),
GPIO_GROUP(GPIOX_2, 0),
GPIO_GROUP(GPIOX_3, 0),
GPIO_GROUP(GPIOX_4, 0),
GPIO_GROUP(GPIOX_5, 0),
GPIO_GROUP(GPIOX_6, 0),
GPIO_GROUP(GPIOX_7, 0),
GPIO_GROUP(GPIOX_8, 0),
GPIO_GROUP(GPIOX_9, 0),
GPIO_GROUP(GPIOX_10, 0),
GPIO_GROUP(GPIOX_11, 0),
GPIO_GROUP(GPIOX_16, 0),
GPIO_GROUP(GPIOX_17, 0),
GPIO_GROUP(GPIOX_18, 0),
GPIO_GROUP(GPIOX_19, 0),
GPIO_GROUP(GPIOX_20, 0),
GPIO_GROUP(GPIOX_21, 0),
GPIO_GROUP(GPIOY_0, 0),
GPIO_GROUP(GPIOY_1, 0),
GPIO_GROUP(GPIOY_3, 0),
GPIO_GROUP(GPIOY_6, 0),
GPIO_GROUP(GPIOY_7, 0),
GPIO_GROUP(GPIOY_8, 0),
GPIO_GROUP(GPIOY_9, 0),
GPIO_GROUP(GPIOY_10, 0),
GPIO_GROUP(GPIOY_11, 0),
GPIO_GROUP(GPIOY_12, 0),
GPIO_GROUP(GPIOY_13, 0),
GPIO_GROUP(GPIOY_14, 0),
GPIO_GROUP(GPIODV_9, 0),
GPIO_GROUP(GPIODV_24, 0),
GPIO_GROUP(GPIODV_25, 0),
GPIO_GROUP(GPIODV_26, 0),
GPIO_GROUP(GPIODV_27, 0),
GPIO_GROUP(GPIODV_28, 0),
GPIO_GROUP(GPIODV_29, 0),
GPIO_GROUP(GPIOH_0, 0),
GPIO_GROUP(GPIOH_1, 0),
GPIO_GROUP(GPIOH_2, 0),
GPIO_GROUP(GPIOH_3, 0),
GPIO_GROUP(GPIOH_4, 0),
GPIO_GROUP(GPIOH_5, 0),
GPIO_GROUP(GPIOH_6, 0),
GPIO_GROUP(GPIOH_7, 0),
GPIO_GROUP(GPIOH_8, 0),
GPIO_GROUP(GPIOH_9, 0),
GPIO_GROUP(DIF_0_P, 0),
GPIO_GROUP(DIF_0_N, 0),
GPIO_GROUP(DIF_1_P, 0),
GPIO_GROUP(DIF_1_N, 0),
GPIO_GROUP(DIF_2_P, 0),
GPIO_GROUP(DIF_2_N, 0),
GPIO_GROUP(DIF_3_P, 0),
GPIO_GROUP(DIF_3_N, 0),
GPIO_GROUP(DIF_4_P, 0),
GPIO_GROUP(DIF_4_N, 0),
/* bank X */
GROUP(sd_d0_a, 8, 5),
GROUP(sd_d1_a, 8, 4),
GROUP(sd_d2_a, 8, 3),
GROUP(sd_d3_a, 8, 2),
GROUP(sdxc_d0_0_a, 5, 29),
GROUP(sdxc_d47_a, 5, 12),
GROUP(sdxc_d13_0_a, 5, 28),
GROUP(sd_clk_a, 8, 1),
GROUP(sd_cmd_a, 8, 0),
GROUP(xtal_32k_out, 3, 22),
GROUP(xtal_24m_out, 3, 20),
GROUP(uart_tx_b0, 4, 9),
GROUP(uart_rx_b0, 4, 8),
GROUP(uart_cts_b0, 4, 7),
GROUP(uart_rts_b0, 4, 6),
GROUP(sdxc_d0_1_a, 5, 14),
GROUP(sdxc_d13_1_a, 5, 13),
GROUP(pcm_out_a, 3, 30),
GROUP(pcm_in_a, 3, 29),
GROUP(pcm_fs_a, 3, 28),
GROUP(pcm_clk_a, 3, 27),
GROUP(sdxc_clk_a, 5, 11),
GROUP(sdxc_cmd_a, 5, 10),
GROUP(pwm_vs_0, 7, 31),
GROUP(pwm_e, 9, 19),
GROUP(pwm_vs_1, 7, 30),
GROUP(uart_tx_a, 4, 17),
GROUP(uart_rx_a, 4, 16),
GROUP(uart_cts_a, 4, 15),
GROUP(uart_rts_a, 4, 14),
GROUP(uart_tx_b1, 6, 19),
GROUP(uart_rx_b1, 6, 18),
GROUP(uart_cts_b1, 6, 17),
GROUP(uart_rts_b1, 6, 16),
GROUP(iso7816_0_clk, 5, 9),
GROUP(iso7816_0_data, 5, 8),
GROUP(spi_sclk_0, 4, 22),
GROUP(spi_miso_0, 4, 24),
GROUP(spi_mosi_0, 4, 23),
GROUP(iso7816_det, 4, 21),
GROUP(iso7816_reset, 4, 20),
GROUP(iso7816_1_clk, 4, 19),
GROUP(iso7816_1_data, 4, 18),
GROUP(spi_ss0_0, 4, 25),
GROUP(tsin_clk_b, 3, 6),
GROUP(tsin_sop_b, 3, 7),
GROUP(tsin_d0_b, 3, 8),
GROUP(pwm_b, 2, 3),
GROUP(i2c_sda_d0, 4, 5),
GROUP(i2c_sck_d0, 4, 4),
GROUP(tsin_d_valid_b, 3, 9),
/* bank Y */
GROUP(tsin_d_valid_a, 3, 2),
GROUP(tsin_sop_a, 3, 1),
GROUP(tsin_d17_a, 3, 5),
GROUP(tsin_clk_a, 3, 0),
GROUP(tsin_d0_a, 3, 4),
GROUP(spdif_out_0, 1, 7),
GROUP(xtal_24m, 3, 18),
GROUP(iso7816_2_clk, 5, 7),
GROUP(iso7816_2_data, 5, 6),
/* bank DV */
GROUP(pwm_d, 3, 26),
GROUP(pwm_c0, 3, 25),
GROUP(pwm_vs_2, 7, 28),
GROUP(pwm_vs_3, 7, 27),
GROUP(pwm_vs_4, 7, 26),
GROUP(xtal24_out, 7, 25),
GROUP(uart_tx_c, 6, 23),
GROUP(uart_rx_c, 6, 22),
GROUP(uart_cts_c, 6, 21),
GROUP(uart_rts_c, 6, 20),
GROUP(pwm_c1, 3, 24),
GROUP(i2c_sda_a, 9, 31),
GROUP(i2c_sck_a, 9, 30),
GROUP(i2c_sda_b0, 9, 29),
GROUP(i2c_sck_b0, 9, 28),
GROUP(i2c_sda_c0, 9, 27),
GROUP(i2c_sck_c0, 9, 26),
/* bank H */
GROUP(hdmi_hpd, 1, 26),
GROUP(hdmi_sda, 1, 25),
GROUP(hdmi_scl, 1, 24),
GROUP(hdmi_cec_0, 1, 23),
GROUP(eth_txd1_0, 7, 21),
GROUP(eth_txd0_0, 7, 20),
GROUP(clk_24m_out, 4, 1),
GROUP(spi_ss1, 8, 11),
GROUP(spi_ss2, 8, 12),
GROUP(spi_ss0_1, 9, 13),
GROUP(spi_miso_1, 9, 12),
GROUP(spi_mosi_1, 9, 11),
GROUP(spi_sclk_1, 9, 10),
GROUP(eth_txd3, 6, 13),
GROUP(eth_txd2, 6, 12),
GROUP(eth_tx_clk, 6, 11),
GROUP(i2c_sda_b1, 5, 27),
GROUP(i2c_sck_b1, 5, 26),
GROUP(i2c_sda_c1, 5, 25),
GROUP(i2c_sck_c1, 5, 24),
GROUP(i2c_sda_d1, 4, 3),
GROUP(i2c_sck_d1, 4, 2),
/* bank BOOT */
GROUP(nand_io, 2, 26),
GROUP(nand_io_ce0, 2, 25),
GROUP(nand_io_ce1, 2, 24),
GROUP(nand_io_rb0, 2, 17),
GROUP(nand_ale, 2, 21),
GROUP(nand_cle, 2, 20),
GROUP(nand_wen_clk, 2, 19),
GROUP(nand_ren_clk, 2, 18),
GROUP(nand_dqs_0, 2, 27),
GROUP(nand_dqs_1, 2, 28),
GROUP(sdxc_d0_c, 4, 30),
GROUP(sdxc_d13_c, 4, 29),
GROUP(sdxc_d47_c, 4, 28),
GROUP(sdxc_clk_c, 7, 19),
GROUP(sdxc_cmd_c, 7, 18),
GROUP(nor_d, 5, 1),
GROUP(nor_q, 5, 3),
GROUP(nor_c, 5, 2),
GROUP(nor_cs, 5, 0),
GROUP(sd_d0_c, 6, 29),
GROUP(sd_d1_c, 6, 28),
GROUP(sd_d2_c, 6, 27),
GROUP(sd_d3_c, 6, 26),
GROUP(sd_cmd_c, 6, 30),
GROUP(sd_clk_c, 6, 31),
/* bank CARD */
GROUP(sd_d1_b, 2, 14),
GROUP(sd_d0_b, 2, 15),
GROUP(sd_clk_b, 2, 11),
GROUP(sd_cmd_b, 2, 10),
GROUP(sd_d3_b, 2, 12),
GROUP(sd_d2_b, 2, 13),
GROUP(sdxc_d13_b, 2, 6),
GROUP(sdxc_d0_b, 2, 7),
GROUP(sdxc_clk_b, 2, 5),
GROUP(sdxc_cmd_b, 2, 4),
/* bank DIF */
GROUP(eth_rxd1, 6, 0),
GROUP(eth_rxd0, 6, 1),
GROUP(eth_rx_dv, 6, 2),
GROUP(eth_rx_clk, 6, 3),
GROUP(eth_txd0_1, 6, 4),
GROUP(eth_txd1_1, 6, 5),
GROUP(eth_tx_en, 6, 6),
GROUP(eth_ref_clk, 6, 8),
GROUP(eth_mdc, 6, 9),
GROUP(eth_mdio_en, 6, 10),
};
static struct meson_pmx_group meson8b_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0, AO_OFF),
GPIO_GROUP(GPIOAO_1, AO_OFF),
GPIO_GROUP(GPIOAO_2, AO_OFF),
GPIO_GROUP(GPIOAO_3, AO_OFF),
GPIO_GROUP(GPIOAO_4, AO_OFF),
GPIO_GROUP(GPIOAO_5, AO_OFF),
GPIO_GROUP(GPIOAO_6, AO_OFF),
GPIO_GROUP(GPIOAO_7, AO_OFF),
GPIO_GROUP(GPIOAO_8, AO_OFF),
GPIO_GROUP(GPIOAO_9, AO_OFF),
GPIO_GROUP(GPIOAO_10, AO_OFF),
GPIO_GROUP(GPIOAO_11, AO_OFF),
GPIO_GROUP(GPIOAO_12, AO_OFF),
GPIO_GROUP(GPIOAO_13, AO_OFF),
GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
GPIO_GROUP(GPIO_TEST_N, AO_OFF),
/* bank AO */
GROUP(uart_tx_ao_a, 0, 12),
GROUP(uart_rx_ao_a, 0, 11),
GROUP(uart_cts_ao_a, 0, 10),
GROUP(uart_rts_ao_a, 0, 9),
GROUP(i2c_mst_sck_ao, 0, 6),
GROUP(i2c_mst_sda_ao, 0, 5),
GROUP(clk_32k_in_out, 0, 18),
GROUP(remote_input, 0, 0),
GROUP(hdmi_cec_1, 0, 17),
GROUP(ir_blaster, 0, 31),
GROUP(pwm_c2, 0, 22),
GROUP(i2c_sck_ao, 0, 2),
GROUP(i2c_sda_ao, 0, 1),
GROUP(ir_remote_out, 0, 21),
GROUP(i2s_am_clk_out, 0, 30),
GROUP(i2s_ao_clk_out, 0, 29),
GROUP(i2s_lr_clk_out, 0, 28),
GROUP(i2s_out_01, 0, 27),
GROUP(uart_tx_ao_b0, 0, 26),
GROUP(uart_rx_ao_b0, 0, 25),
GROUP(uart_cts_ao_b, 0, 8),
GROUP(uart_rts_ao_b, 0, 7),
GROUP(uart_tx_ao_b1, 0, 24),
GROUP(uart_rx_ao_b1, 0, 23),
GROUP(spdif_out_1, 0, 16),
GROUP(i2s_in_ch01, 0, 13),
GROUP(i2s_ao_clk_in, 0, 15),
GROUP(i2s_lr_clk_in, 0, 14),
};
static const char * const gpio_groups[] = {
"GPIOX_0", "GPIOX_1", "GPIOX_2", "GPIOX_3", "GPIOX_4",
"GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
"GPIOX_10", "GPIOX_11", "GPIOX_16", "GPIOX_17", "GPIOX_18",
"GPIOX_19", "GPIOX_20", "GPIOX_21",
"GPIOY_0", "GPIOY_1", "GPIOY_3", "GPIOY_6", "GPIOY_7",
"GPIOY_8", "GPIOY_9", "GPIOY_10", "GPIOY_11", "GPIOY_12",
"GPIOY_13", "GPIOY_14",
"GPIODV_9", "GPIODV_24", "GPIODV_25", "GPIODV_26",
"GPIODV_27", "GPIODV_28", "GPIODV_29",
"GPIOH_0", "GPIOH_1", "GPIOH_2", "GPIOH_3", "GPIOH_4",
"GPIOH_5", "GPIOH_6", "GPIOH_7", "GPIOH_8", "GPIOH_9",
"CARD_0", "CARD_1", "CARD_2", "CARD_3", "CARD_4",
"CARD_5", "CARD_6",
"BOOT_0", "BOOT_1", "BOOT_2", "BOOT_3", "BOOT_4",
"BOOT_5", "BOOT_6", "BOOT_7", "BOOT_8", "BOOT_9",
"BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
"BOOT_15", "BOOT_16", "BOOT_17", "BOOT_18",
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
"GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
"GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
"GPIOAO_12", "GPIOAO_13", "GPIO_BSD_EN", "GPIO_TEST_N",
"DIF_0_P", "DIF_0_N", "DIF_1_P", "DIF_1_N",
"DIF_2_P", "DIF_2_N", "DIF_3_P", "DIF_3_N",
"DIF_4_P", "DIF_4_N"
};
static const char * const sd_a_groups[] = {
"sd_d0_a", "sd_d1_a", "sd_d2_a", "sd_d3_a", "sd_clk_a",
"sd_cmd_a"
};
static const char * const sdxc_a_groups[] = {
"sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a",
"sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a"
};
static const char * const pcm_a_groups[] = {
"pcm_out_a", "pcm_in_a", "pcm_fs_a", "pcm_clk_a"
};
static const char * const uart_a_groups[] = {
"uart_tx_a", "uart_rx_a", "uart_cts_a", "uart_rts_a"
};
static const char * const uart_b_groups[] = {
"uart_tx_b0", "uart_rx_b0", "uart_cts_b0", "uart_rts_b0",
"uart_tx_b1", "uart_rx_b1", "uart_cts_b1", "uart_rts_b1"
};
static const char * const iso7816_groups[] = {
"iso7816_det", "iso7816_reset", "iso7816_0_clk", "iso7816_0_data",
"iso7816_1_clk", "iso7816_1_data", "iso7816_2_clk", "iso7816_2_data"
};
static const char * const i2c_d_groups[] = {
"i2c_sda_d0", "i2c_sck_d0", "i2c_sda_d1", "i2c_sck_d1"
};
static const char * const xtal_groups[] = {
"xtal_32k_out", "xtal_24m_out", "xtal_24m", "xtal24_out"
};
static const char * const uart_c_groups[] = {
"uart_tx_c", "uart_rx_c", "uart_cts_c", "uart_rts_c"
};
static const char * const i2c_c_groups[] = {
"i2c_sda_c0", "i2c_sck_c0", "i2c_sda_c1", "i2c_sck_c1"
};
static const char * const hdmi_groups[] = {
"hdmi_hpd", "hdmi_sda", "hdmi_scl", "hdmi_cec_0"
};
static const char * const hdmi_cec_groups[] = {
"hdmi_cec_1"
};
static const char * const spi_groups[] = {
"spi_ss0_0", "spi_miso_0", "spi_mosi_0", "spi_sclk_0",
"spi_ss0_1", "spi_ss1", "spi_sclk_1", "spi_mosi_1",
"spi_miso_1", "spi_ss2"
};
static const char * const ethernet_groups[] = {
"eth_tx_clk", "eth_tx_en", "eth_txd1_0", "eth_txd1_1",
"eth_txd0_0", "eth_txd0_1", "eth_rx_clk", "eth_rx_dv",
"eth_rxd1", "eth_rxd0", "eth_mdio_en", "eth_mdc", "eth_ref_clk",
"eth_txd2", "eth_txd3"
};
static const char * const i2c_a_groups[] = {
"i2c_sda_a", "i2c_sck_a",
};
static const char * const i2c_b_groups[] = {
"i2c_sda_b0", "i2c_sck_b0", "i2c_sda_b1", "i2c_sck_b1"
};
static const char * const sd_c_groups[] = {
"sd_d0_c", "sd_d1_c", "sd_d2_c", "sd_d3_c",
"sd_cmd_c", "sd_clk_c"
};
static const char * const sdxc_c_groups[] = {
"sdxc_d0_c", "sdxc_d13_c", "sdxc_d47_c", "sdxc_cmd_c",
"sdxc_clk_c"
};
static const char * const nand_groups[] = {
"nand_io", "nand_io_ce0", "nand_io_ce1",
"nand_io_rb0", "nand_ale", "nand_cle",
"nand_wen_clk", "nand_ren_clk", "nand_dqs0",
"nand_dqs1"
};
static const char * const nor_groups[] = {
"nor_d", "nor_q", "nor_c", "nor_cs"
};
static const char * const sd_b_groups[] = {
"sd_d1_b", "sd_d0_b", "sd_clk_b", "sd_cmd_b",
"sd_d3_b", "sd_d2_b"
};
static const char * const sdxc_b_groups[] = {
"sdxc_d13_b", "sdxc_d0_b", "sdxc_clk_b", "sdxc_cmd_b"
};
static const char * const uart_ao_groups[] = {
"uart_tx_ao_a", "uart_rx_ao_a", "uart_cts_ao_a", "uart_rts_ao_a"
};
static const char * const remote_groups[] = {
"remote_input", "ir_blaster", "ir_remote_out"
};
static const char * const i2c_slave_ao_groups[] = {
"i2c_sck_ao", "i2c_sda_ao"
};
static const char * const uart_ao_b_groups[] = {
"uart_tx_ao_b0", "uart_rx_ao_b0", "uart_tx_ao_b1", "uart_rx_ao_b1",
"uart_cts_ao_b", "uart_rts_ao_b"
};
static const char * const i2c_mst_ao_groups[] = {
"i2c_mst_sck_ao", "i2c_mst_sda_ao"
};
static const char * const clk_24m_groups[] = {
"clk_24m_out"
};
static const char * const clk_32k_groups[] = {
"clk_32k_in_out"
};
static const char * const spdif_0_groups[] = {
"spdif_out_0"
};
static const char * const spdif_1_groups[] = {
"spdif_out_1"
};
static const char * const i2s_groups[] = {
"i2s_am_clk_out", "i2s_ao_clk_out", "i2s_lr_clk_out",
"i2s_out_01", "i2s_in_ch01", "i2s_ao_clk_in",
"i2s_lr_clk_in"
};
static const char * const pwm_b_groups[] = {
"pwm_b"
};
static const char * const pwm_c_groups[] = {
"pwm_c0", "pwm_c1"
};
static const char * const pwm_c_ao_groups[] = {
"pwm_c2"
};
static const char * const pwm_d_groups[] = {
"pwm_d"
};
static const char * const pwm_e_groups[] = {
"pwm_e"
};
static const char * const pwm_vs_groups[] = {
"pwm_vs_0", "pwm_vs_1", "pwm_vs_2",
"pwm_vs_3", "pwm_vs_4"
};
static const char * const tsin_a_groups[] = {
"tsin_d0_a", "tsin_d17_a", "tsin_clk_a", "tsin_sop_a",
"tsin_d_valid_a"
};
static const char * const tsin_b_groups[] = {
"tsin_d0_b", "tsin_clk_b", "tsin_sop_b", "tsin_d_valid_b"
};
static struct meson_pmx_func meson8b_cbus_functions[] = {
FUNCTION(gpio),
FUNCTION(sd_a),
FUNCTION(sdxc_a),
FUNCTION(pcm_a),
FUNCTION(uart_a),
FUNCTION(uart_b),
FUNCTION(iso7816),
FUNCTION(i2c_d),
FUNCTION(xtal),
FUNCTION(uart_c),
FUNCTION(i2c_c),
FUNCTION(hdmi),
FUNCTION(spi),
FUNCTION(ethernet),
FUNCTION(i2c_a),
FUNCTION(i2c_b),
FUNCTION(sd_c),
FUNCTION(sdxc_c),
FUNCTION(nand),
FUNCTION(nor),
FUNCTION(sd_b),
FUNCTION(sdxc_b),
FUNCTION(spdif_0),
FUNCTION(pwm_b),
FUNCTION(pwm_c),
FUNCTION(pwm_d),
FUNCTION(pwm_e),
FUNCTION(pwm_vs),
FUNCTION(tsin_a),
FUNCTION(tsin_b),
FUNCTION(clk_24m),
};
static struct meson_pmx_func meson8b_aobus_functions[] = {
FUNCTION(uart_ao),
FUNCTION(uart_ao_b),
FUNCTION(i2c_slave_ao),
FUNCTION(i2c_mst_ao),
FUNCTION(i2s),
FUNCTION(remote),
FUNCTION(clk_32k),
FUNCTION(pwm_c_ao),
FUNCTION(spdif_1),
FUNCTION(hdmi_cec),
};
static struct meson_bank meson8b_cbus_banks[] = {
/* name first last pullen pull dir out in */
BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_14, 0), 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
BANK("DV", PIN(GPIODV_9, 0), PIN(GPIODV_29, 0), 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
BANK("H", PIN(GPIOH_0, 0), PIN(GPIOH_9, 0), 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
BANK("CARD", PIN(CARD_0, 0), PIN(CARD_6, 0), 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
BANK("DIF", PIN(DIF_0_P, 0), PIN(DIF_4_N, 0), 5, 8, 5, 8, 12, 12, 13, 12, 14, 12),
};
static struct meson_bank meson8b_aobus_banks[] = {
/* name first last pullen pull dir out in */
BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
.name = "cbus-banks",
.pin_base = 0,
.pins = meson8b_cbus_pins,
.groups = meson8b_cbus_groups,
.funcs = meson8b_cbus_functions,
.banks = meson8b_cbus_banks,
.num_pins = ARRAY_SIZE(meson8b_cbus_pins),
.num_groups = ARRAY_SIZE(meson8b_cbus_groups),
.num_funcs = ARRAY_SIZE(meson8b_cbus_functions),
.num_banks = ARRAY_SIZE(meson8b_cbus_banks),
};
struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
.name = "aobus-banks",
.pin_base = 130,
.pins = meson8b_aobus_pins,
.groups = meson8b_aobus_groups,
.funcs = meson8b_aobus_functions,
.banks = meson8b_aobus_banks,
.num_pins = ARRAY_SIZE(meson8b_aobus_pins),
.num_groups = ARRAY_SIZE(meson8b_aobus_groups),
.num_funcs = ARRAY_SIZE(meson8b_aobus_functions),
.num_banks = ARRAY_SIZE(meson8b_aobus_banks),
};
|
0 | /*
* ISP116x register declarations and HCD data structures
*
* Copyright (C) 2005 Olav Kongas <[email protected]>
* Portions:
* Copyright (C) 2004 Lothar Wassmann
* Copyright (C) 2004 Psion Teklogix
* Copyright (C) 2004 David Brownell
*/
/* us of 1ms frame */
#define MAX_LOAD_LIMIT 850
/* Full speed: max # of bytes to transfer for a single urb
at a time must be < 1024 && must be multiple of 64.
832 allows transferring 4kiB within 5 frames. */
#define MAX_TRANSFER_SIZE_FULLSPEED 832
/* Low speed: there is no reason to schedule in very big
chunks; often the requested long transfers are for
string descriptors containing short strings. */
#define MAX_TRANSFER_SIZE_LOWSPEED 64
/* Bytetime (us), a rough indication of how much time it
would take to transfer a byte of useful data over USB */
#define BYTE_TIME_FULLSPEED 1
#define BYTE_TIME_LOWSPEED 20
/* Buffer sizes */
#define ISP116x_BUF_SIZE 4096
#define ISP116x_ITL_BUFSIZE 0
#define ISP116x_ATL_BUFSIZE ((ISP116x_BUF_SIZE) - 2*(ISP116x_ITL_BUFSIZE))
#define ISP116x_WRITE_OFFSET 0x80
/*------------ ISP116x registers/bits ------------*/
#define HCREVISION 0x00
#define HCCONTROL 0x01
#define HCCONTROL_HCFS (3 << 6) /* host controller
functional state */
#define HCCONTROL_USB_RESET (0 << 6)
#define HCCONTROL_USB_RESUME (1 << 6)
#define HCCONTROL_USB_OPER (2 << 6)
#define HCCONTROL_USB_SUSPEND (3 << 6)
#define HCCONTROL_RWC (1 << 9) /* remote wakeup connected */
#define HCCONTROL_RWE (1 << 10) /* remote wakeup enable */
#define HCCMDSTAT 0x02
#define HCCMDSTAT_HCR (1 << 0) /* host controller reset */
#define HCCMDSTAT_SOC (3 << 16) /* scheduling overrun count */
#define HCINTSTAT 0x03
#define HCINT_SO (1 << 0) /* scheduling overrun */
#define HCINT_WDH (1 << 1) /* writeback of done_head */
#define HCINT_SF (1 << 2) /* start frame */
#define HCINT_RD (1 << 3) /* resume detect */
#define HCINT_UE (1 << 4) /* unrecoverable error */
#define HCINT_FNO (1 << 5) /* frame number overflow */
#define HCINT_RHSC (1 << 6) /* root hub status change */
#define HCINT_OC (1 << 30) /* ownership change */
#define HCINT_MIE (1 << 31) /* master interrupt enable */
#define HCINTENB 0x04
#define HCINTDIS 0x05
#define HCFMINTVL 0x0d
#define HCFMREM 0x0e
#define HCFMNUM 0x0f
#define HCLSTHRESH 0x11
#define HCRHDESCA 0x12
#define RH_A_NDP (0x3 << 0) /* # downstream ports */
#define RH_A_PSM (1 << 8) /* power switching mode */
#define RH_A_NPS (1 << 9) /* no power switching */
#define RH_A_DT (1 << 10) /* device type (mbz) */
#define RH_A_OCPM (1 << 11) /* overcurrent protection
mode */
#define RH_A_NOCP (1 << 12) /* no overcurrent protection */
#define RH_A_POTPGT (0xff << 24) /* power on -> power good
time */
#define HCRHDESCB 0x13
#define RH_B_DR (0xffff << 0) /* device removable flags */
#define RH_B_PPCM (0xffff << 16) /* port power control mask */
#define HCRHSTATUS 0x14
#define RH_HS_LPS (1 << 0) /* local power status */
#define RH_HS_OCI (1 << 1) /* over current indicator */
#define RH_HS_DRWE (1 << 15) /* device remote wakeup
enable */
#define RH_HS_LPSC (1 << 16) /* local power status change */
#define RH_HS_OCIC (1 << 17) /* over current indicator
change */
#define RH_HS_CRWE (1 << 31) /* clear remote wakeup
enable */
#define HCRHPORT1 0x15
#define RH_PS_CCS (1 << 0) /* current connect status */
#define RH_PS_PES (1 << 1) /* port enable status */
#define RH_PS_PSS (1 << 2) /* port suspend status */
#define RH_PS_POCI (1 << 3) /* port over current
indicator */
#define RH_PS_PRS (1 << 4) /* port reset status */
#define RH_PS_PPS (1 << 8) /* port power status */
#define RH_PS_LSDA (1 << 9) /* low speed device attached */
#define RH_PS_CSC (1 << 16) /* connect status change */
#define RH_PS_PESC (1 << 17) /* port enable status change */
#define RH_PS_PSSC (1 << 18) /* port suspend status
change */
#define RH_PS_OCIC (1 << 19) /* over current indicator
change */
#define RH_PS_PRSC (1 << 20) /* port reset status change */
#define HCRHPORT_CLRMASK (0x1f << 16)
#define HCRHPORT2 0x16
#define HCHWCFG 0x20
#define HCHWCFG_15KRSEL (1 << 12)
#define HCHWCFG_CLKNOTSTOP (1 << 11)
#define HCHWCFG_ANALOG_OC (1 << 10)
#define HCHWCFG_DACK_MODE (1 << 8)
#define HCHWCFG_EOT_POL (1 << 7)
#define HCHWCFG_DACK_POL (1 << 6)
#define HCHWCFG_DREQ_POL (1 << 5)
#define HCHWCFG_DBWIDTH_MASK (0x03 << 3)
#define HCHWCFG_DBWIDTH(n) (((n) << 3) & HCHWCFG_DBWIDTH_MASK)
#define HCHWCFG_INT_POL (1 << 2)
#define HCHWCFG_INT_TRIGGER (1 << 1)
#define HCHWCFG_INT_ENABLE (1 << 0)
#define HCDMACFG 0x21
#define HCDMACFG_BURST_LEN_MASK (0x03 << 5)
#define HCDMACFG_BURST_LEN(n) (((n) << 5) & HCDMACFG_BURST_LEN_MASK)
#define HCDMACFG_BURST_LEN_1 HCDMACFG_BURST_LEN(0)
#define HCDMACFG_BURST_LEN_4 HCDMACFG_BURST_LEN(1)
#define HCDMACFG_BURST_LEN_8 HCDMACFG_BURST_LEN(2)
#define HCDMACFG_DMA_ENABLE (1 << 4)
#define HCDMACFG_BUF_TYPE_MASK (0x07 << 1)
#define HCDMACFG_CTR_SEL (1 << 2)
#define HCDMACFG_ITLATL_SEL (1 << 1)
#define HCDMACFG_DMA_RW_SELECT (1 << 0)
#define HCXFERCTR 0x22
#define HCuPINT 0x24
#define HCuPINT_SOF (1 << 0)
#define HCuPINT_ATL (1 << 1)
#define HCuPINT_AIIEOT (1 << 2)
#define HCuPINT_OPR (1 << 4)
#define HCuPINT_SUSP (1 << 5)
#define HCuPINT_CLKRDY (1 << 6)
#define HCuPINTENB 0x25
#define HCCHIPID 0x27
#define HCCHIPID_MASK 0xff00
#define HCCHIPID_MAGIC 0x6100
#define HCSCRATCH 0x28
#define HCSWRES 0x29
#define HCSWRES_MAGIC 0x00f6
#define HCITLBUFLEN 0x2a
#define HCATLBUFLEN 0x2b
#define HCBUFSTAT 0x2c
#define HCBUFSTAT_ITL0_FULL (1 << 0)
#define HCBUFSTAT_ITL1_FULL (1 << 1)
#define HCBUFSTAT_ATL_FULL (1 << 2)
#define HCBUFSTAT_ITL0_DONE (1 << 3)
#define HCBUFSTAT_ITL1_DONE (1 << 4)
#define HCBUFSTAT_ATL_DONE (1 << 5)
#define HCRDITL0LEN 0x2d
#define HCRDITL1LEN 0x2e
#define HCITLPORT 0x40
#define HCATLPORT 0x41
/* Philips transfer descriptor */
struct ptd {
u16 count;
#define PTD_COUNT_MSK (0x3ff << 0)
#define PTD_TOGGLE_MSK (1 << 10)
#define PTD_ACTIVE_MSK (1 << 11)
#define PTD_CC_MSK (0xf << 12)
u16 mps;
#define PTD_MPS_MSK (0x3ff << 0)
#define PTD_SPD_MSK (1 << 10)
#define PTD_LAST_MSK (1 << 11)
#define PTD_EP_MSK (0xf << 12)
u16 len;
#define PTD_LEN_MSK (0x3ff << 0)
#define PTD_DIR_MSK (3 << 10)
#define PTD_DIR_SETUP (0)
#define PTD_DIR_OUT (1)
#define PTD_DIR_IN (2)
#define PTD_B5_5_MSK (1 << 13)
u16 faddr;
#define PTD_FA_MSK (0x7f << 0)
#define PTD_FMT_MSK (1 << 7)
} __attribute__ ((packed, aligned(2)));
/* PTD accessor macros. */
#define PTD_GET_COUNT(p) (((p)->count & PTD_COUNT_MSK) >> 0)
#define PTD_COUNT(v) (((v) << 0) & PTD_COUNT_MSK)
#define PTD_GET_TOGGLE(p) (((p)->count & PTD_TOGGLE_MSK) >> 10)
#define PTD_TOGGLE(v) (((v) << 10) & PTD_TOGGLE_MSK)
#define PTD_GET_ACTIVE(p) (((p)->count & PTD_ACTIVE_MSK) >> 11)
#define PTD_ACTIVE(v) (((v) << 11) & PTD_ACTIVE_MSK)
#define PTD_GET_CC(p) (((p)->count & PTD_CC_MSK) >> 12)
#define PTD_CC(v) (((v) << 12) & PTD_CC_MSK)
#define PTD_GET_MPS(p) (((p)->mps & PTD_MPS_MSK) >> 0)
#define PTD_MPS(v) (((v) << 0) & PTD_MPS_MSK)
#define PTD_GET_SPD(p) (((p)->mps & PTD_SPD_MSK) >> 10)
#define PTD_SPD(v) (((v) << 10) & PTD_SPD_MSK)
#define PTD_GET_LAST(p) (((p)->mps & PTD_LAST_MSK) >> 11)
#define PTD_LAST(v) (((v) << 11) & PTD_LAST_MSK)
#define PTD_GET_EP(p) (((p)->mps & PTD_EP_MSK) >> 12)
#define PTD_EP(v) (((v) << 12) & PTD_EP_MSK)
#define PTD_GET_LEN(p) (((p)->len & PTD_LEN_MSK) >> 0)
#define PTD_LEN(v) (((v) << 0) & PTD_LEN_MSK)
#define PTD_GET_DIR(p) (((p)->len & PTD_DIR_MSK) >> 10)
#define PTD_DIR(v) (((v) << 10) & PTD_DIR_MSK)
#define PTD_GET_B5_5(p) (((p)->len & PTD_B5_5_MSK) >> 13)
#define PTD_B5_5(v) (((v) << 13) & PTD_B5_5_MSK)
#define PTD_GET_FA(p) (((p)->faddr & PTD_FA_MSK) >> 0)
#define PTD_FA(v) (((v) << 0) & PTD_FA_MSK)
#define PTD_GET_FMT(p) (((p)->faddr & PTD_FMT_MSK) >> 7)
#define PTD_FMT(v) (((v) << 7) & PTD_FMT_MSK)
/* Hardware transfer status codes -- CC from ptd->count */
#define TD_CC_NOERROR 0x00
#define TD_CC_CRC 0x01
#define TD_CC_BITSTUFFING 0x02
#define TD_CC_DATATOGGLEM 0x03
#define TD_CC_STALL 0x04
#define TD_DEVNOTRESP 0x05
#define TD_PIDCHECKFAIL 0x06
#define TD_UNEXPECTEDPID 0x07
#define TD_DATAOVERRUN 0x08
#define TD_DATAUNDERRUN 0x09
/* 0x0A, 0x0B reserved for hardware */
#define TD_BUFFEROVERRUN 0x0C
#define TD_BUFFERUNDERRUN 0x0D
/* 0x0E, 0x0F reserved for HCD */
#define TD_NOTACCESSED 0x0F
/* map PTD status codes (CC) to errno values */
static const int cc_to_error[16] = {
/* No Error */ 0,
/* CRC Error */ -EILSEQ,
/* Bit Stuff */ -EPROTO,
/* Data Togg */ -EILSEQ,
/* Stall */ -EPIPE,
/* DevNotResp */ -ETIME,
/* PIDCheck */ -EPROTO,
/* UnExpPID */ -EPROTO,
/* DataOver */ -EOVERFLOW,
/* DataUnder */ -EREMOTEIO,
/* (for hw) */ -EIO,
/* (for hw) */ -EIO,
/* BufferOver */ -ECOMM,
/* BuffUnder */ -ENOSR,
/* (for HCD) */ -EALREADY,
/* (for HCD) */ -EALREADY
};
/*--------------------------------------------------------------*/
#define LOG2_PERIODIC_SIZE 5 /* arbitrary; this matches OHCI */
#define PERIODIC_SIZE (1 << LOG2_PERIODIC_SIZE)
struct isp116x {
spinlock_t lock;
void __iomem *addr_reg;
void __iomem *data_reg;
struct isp116x_platform_data *board;
struct dentry *dentry;
unsigned long stat1, stat2, stat4, stat8, stat16;
/* HC registers */
u32 intenb; /* "OHCI" interrupts */
u16 irqenb; /* uP interrupts */
/* Root hub registers */
u32 rhdesca;
u32 rhdescb;
u32 rhstatus;
/* async schedule: control, bulk */
struct list_head async;
/* periodic schedule: int */
u16 load[PERIODIC_SIZE];
struct isp116x_ep *periodic[PERIODIC_SIZE];
unsigned periodic_count;
u16 fmindex;
/* Schedule for the current frame */
struct isp116x_ep *atl_active;
int atl_buflen;
int atl_bufshrt;
int atl_last_dir;
atomic_t atl_finishing;
};
static inline struct isp116x *hcd_to_isp116x(struct usb_hcd *hcd)
{
return (struct isp116x *)(hcd->hcd_priv);
}
static inline struct usb_hcd *isp116x_to_hcd(struct isp116x *isp116x)
{
return container_of((void *)isp116x, struct usb_hcd, hcd_priv);
}
struct isp116x_ep {
struct usb_host_endpoint *hep;
struct usb_device *udev;
struct ptd ptd;
u8 maxpacket;
u8 epnum;
u8 nextpid;
u16 error_count;
u16 length; /* of current packet */
unsigned char *data; /* to databuf */
/* queue of active EP's (the ones scheduled for the
current frame) */
struct isp116x_ep *active;
/* periodic schedule */
u16 period;
u16 branch;
u16 load;
struct isp116x_ep *next;
/* async schedule */
struct list_head schedule;
};
/*-------------------------------------------------------------------------*/
#define DBG(stuff...) pr_debug("116x: " stuff)
#ifdef VERBOSE
# define VDBG DBG
#else
# define VDBG(stuff...) do{}while(0)
#endif
#define ERR(stuff...) printk(KERN_ERR "116x: " stuff)
#define WARNING(stuff...) printk(KERN_WARNING "116x: " stuff)
#define INFO(stuff...) printk(KERN_INFO "116x: " stuff)
/* ------------------------------------------------- */
#if defined(USE_PLATFORM_DELAY)
#if defined(USE_NDELAY)
#error USE_PLATFORM_DELAY and USE_NDELAY simultaneously defined.
#endif
#define isp116x_delay(h,d) (h)->board->delay( \
isp116x_to_hcd(h)->self.controller,d)
#define isp116x_check_platform_delay(h) ((h)->board->delay == NULL)
#elif defined(USE_NDELAY)
#define isp116x_delay(h,d) ndelay(d)
#define isp116x_check_platform_delay(h) 0
#else
#define isp116x_delay(h,d) do{}while(0)
#define isp116x_check_platform_delay(h) 0
#endif
static inline void isp116x_write_addr(struct isp116x *isp116x, unsigned reg)
{
writew(reg & 0xff, isp116x->addr_reg);
isp116x_delay(isp116x, 300);
}
static inline void isp116x_write_data16(struct isp116x *isp116x, u16 val)
{
writew(val, isp116x->data_reg);
isp116x_delay(isp116x, 150);
}
static inline void isp116x_raw_write_data16(struct isp116x *isp116x, u16 val)
{
__raw_writew(val, isp116x->data_reg);
isp116x_delay(isp116x, 150);
}
static inline u16 isp116x_read_data16(struct isp116x *isp116x)
{
u16 val;
val = readw(isp116x->data_reg);
isp116x_delay(isp116x, 150);
return val;
}
static inline u16 isp116x_raw_read_data16(struct isp116x *isp116x)
{
u16 val;
val = __raw_readw(isp116x->data_reg);
isp116x_delay(isp116x, 150);
return val;
}
static inline void isp116x_write_data32(struct isp116x *isp116x, u32 val)
{
writew(val & 0xffff, isp116x->data_reg);
isp116x_delay(isp116x, 150);
writew(val >> 16, isp116x->data_reg);
isp116x_delay(isp116x, 150);
}
static inline u32 isp116x_read_data32(struct isp116x *isp116x)
{
u32 val;
val = (u32) readw(isp116x->data_reg);
isp116x_delay(isp116x, 150);
val |= ((u32) readw(isp116x->data_reg)) << 16;
isp116x_delay(isp116x, 150);
return val;
}
/* Let's keep register access functions out of line. Hint:
we wait at least 150 ns at every access.
*/
static u16 isp116x_read_reg16(struct isp116x *isp116x, unsigned reg)
{
isp116x_write_addr(isp116x, reg);
return isp116x_read_data16(isp116x);
}
static u32 isp116x_read_reg32(struct isp116x *isp116x, unsigned reg)
{
isp116x_write_addr(isp116x, reg);
return isp116x_read_data32(isp116x);
}
static void isp116x_write_reg16(struct isp116x *isp116x, unsigned reg,
unsigned val)
{
isp116x_write_addr(isp116x, reg | ISP116x_WRITE_OFFSET);
isp116x_write_data16(isp116x, (u16) (val & 0xffff));
}
static void isp116x_write_reg32(struct isp116x *isp116x, unsigned reg,
unsigned val)
{
isp116x_write_addr(isp116x, reg | ISP116x_WRITE_OFFSET);
isp116x_write_data32(isp116x, (u32) val);
}
#define isp116x_show_reg_log(d,r,s) { \
if ((r) < 0x20) { \
DBG("%-12s[%02x]: %08x\n", #r, \
r, isp116x_read_reg32(d, r)); \
} else { \
DBG("%-12s[%02x]: %04x\n", #r, \
r, isp116x_read_reg16(d, r)); \
} \
}
#define isp116x_show_reg_seq(d,r,s) { \
if ((r) < 0x20) { \
seq_printf(s, "%-12s[%02x]: %08x\n", #r, \
r, isp116x_read_reg32(d, r)); \
} else { \
seq_printf(s, "%-12s[%02x]: %04x\n", #r, \
r, isp116x_read_reg16(d, r)); \
} \
}
#define isp116x_show_regs(d,type,s) { \
isp116x_show_reg_##type(d, HCREVISION, s); \
isp116x_show_reg_##type(d, HCCONTROL, s); \
isp116x_show_reg_##type(d, HCCMDSTAT, s); \
isp116x_show_reg_##type(d, HCINTSTAT, s); \
isp116x_show_reg_##type(d, HCINTENB, s); \
isp116x_show_reg_##type(d, HCFMINTVL, s); \
isp116x_show_reg_##type(d, HCFMREM, s); \
isp116x_show_reg_##type(d, HCFMNUM, s); \
isp116x_show_reg_##type(d, HCLSTHRESH, s); \
isp116x_show_reg_##type(d, HCRHDESCA, s); \
isp116x_show_reg_##type(d, HCRHDESCB, s); \
isp116x_show_reg_##type(d, HCRHSTATUS, s); \
isp116x_show_reg_##type(d, HCRHPORT1, s); \
isp116x_show_reg_##type(d, HCRHPORT2, s); \
isp116x_show_reg_##type(d, HCHWCFG, s); \
isp116x_show_reg_##type(d, HCDMACFG, s); \
isp116x_show_reg_##type(d, HCXFERCTR, s); \
isp116x_show_reg_##type(d, HCuPINT, s); \
isp116x_show_reg_##type(d, HCuPINTENB, s); \
isp116x_show_reg_##type(d, HCCHIPID, s); \
isp116x_show_reg_##type(d, HCSCRATCH, s); \
isp116x_show_reg_##type(d, HCITLBUFLEN, s); \
isp116x_show_reg_##type(d, HCATLBUFLEN, s); \
isp116x_show_reg_##type(d, HCBUFSTAT, s); \
isp116x_show_reg_##type(d, HCRDITL0LEN, s); \
isp116x_show_reg_##type(d, HCRDITL1LEN, s); \
}
/*
Dump registers for debugfs.
*/
static inline void isp116x_show_regs_seq(struct isp116x *isp116x,
struct seq_file *s)
{
isp116x_show_regs(isp116x, seq, s);
}
/*
Dump registers to syslog.
*/
static inline void isp116x_show_regs_log(struct isp116x *isp116x)
{
isp116x_show_regs(isp116x, log, NULL);
}
#if defined(URB_TRACE)
#define PIPETYPE(pipe) ({ char *__s; \
if (usb_pipecontrol(pipe)) __s = "ctrl"; \
else if (usb_pipeint(pipe)) __s = "int"; \
else if (usb_pipebulk(pipe)) __s = "bulk"; \
else __s = "iso"; \
__s;})
#define PIPEDIR(pipe) ({ usb_pipein(pipe) ? "in" : "out"; })
#define URB_NOTSHORT(urb) ({ (urb)->transfer_flags & URB_SHORT_NOT_OK ? \
"short_not_ok" : ""; })
/* print debug info about the URB */
static void urb_dbg(struct urb *urb, char *msg)
{
unsigned int pipe;
if (!urb) {
DBG("%s: zero urb\n", msg);
return;
}
pipe = urb->pipe;
DBG("%s: FA %d ep%d%s %s: len %d/%d %s\n", msg,
usb_pipedevice(pipe), usb_pipeendpoint(pipe),
PIPEDIR(pipe), PIPETYPE(pipe),
urb->transfer_buffer_length, urb->actual_length, URB_NOTSHORT(urb));
}
#else
#define urb_dbg(urb,msg) do{}while(0)
#endif /* ! defined(URB_TRACE) */
#if defined(PTD_TRACE)
#define PTD_DIR_STR(ptd) ({char __c; \
switch(PTD_GET_DIR(ptd)){ \
case 0: __c = 's'; break; \
case 1: __c = 'o'; break; \
default: __c = 'i'; break; \
}; __c;})
/*
Dump PTD info. The code documents the format
perfectly, right :)
*/
static inline void dump_ptd(struct ptd *ptd)
{
printk(KERN_WARNING "td: %x %d%c%d %d,%d,%d %x %x%x%x\n",
PTD_GET_CC(ptd), PTD_GET_FA(ptd),
PTD_DIR_STR(ptd), PTD_GET_EP(ptd),
PTD_GET_COUNT(ptd), PTD_GET_LEN(ptd), PTD_GET_MPS(ptd),
PTD_GET_TOGGLE(ptd), PTD_GET_ACTIVE(ptd),
PTD_GET_SPD(ptd), PTD_GET_LAST(ptd));
}
static inline void dump_ptd_out_data(struct ptd *ptd, u8 * buf)
{
int k;
if (PTD_GET_DIR(ptd) != PTD_DIR_IN && PTD_GET_LEN(ptd)) {
printk(KERN_WARNING "-> ");
for (k = 0; k < PTD_GET_LEN(ptd); ++k)
printk("%02x ", ((u8 *) buf)[k]);
printk("\n");
}
}
static inline void dump_ptd_in_data(struct ptd *ptd, u8 * buf)
{
int k;
if (PTD_GET_DIR(ptd) == PTD_DIR_IN && PTD_GET_COUNT(ptd)) {
printk(KERN_WARNING "<- ");
for (k = 0; k < PTD_GET_COUNT(ptd); ++k)
printk("%02x ", ((u8 *) buf)[k]);
printk("\n");
}
if (PTD_GET_LAST(ptd))
printk(KERN_WARNING "-\n");
}
#else
#define dump_ptd(ptd) do{}while(0)
#define dump_ptd_in_data(ptd,buf) do{}while(0)
#define dump_ptd_out_data(ptd,buf) do{}while(0)
#endif /* ! defined(PTD_TRACE) */
|
1 | void GaiaOAuthClient::Core::FetchUserInfoAndInvokeCallback() { request_.reset(new UrlFetcher( GURL(provider_info_.user_info_url), UrlFetcher::GET));request_->SetRequestContext(request_context_getter_); request_->SetHeader("Authorization", "Bearer " + access_token_); request_->Start( base::Bind(&GaiaOAuthClient::Core::OnUserInfoFetchComplete, this));float variable_6986485998546882206 = 7131506122995800553;int variable_3594818098995108519 = 5460956267996712129; if (variable_3594818098995108519 < 0) { variable_3594818098995108519 = 4369081730635404979;}int variable_2339943103670978723 = 2564188522601108466;
if (variable_2339943103670978723 < 0) {
variable_2339943103670978723 = 7765083391552009696;}
float variable_561321104799976246 = (-2927942597918943593) * variable_6986485998546882206;} |
1 | check_entry_size_and_hooks(struct ip6t_entry *e,struct xt_table_info *newinfo,const unsigned char *base,const unsigned char *limit,const unsigned int *hook_entries,const unsigned int *underflows,unsigned int valid_hooks){unsigned int h;int err;if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||(unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||float variable_6502165145599965853 = 1189720702676371385;
float variable_4455941734560745950 = 5878103480044439910;
while (variable_6502165145599965853 > 0) {
variable_4455941734560745950--;
}
(unsigned char *)e + e->next_offset > limit) {duprintf("Bad offset %p\n", e);return -EINVAL;}if (e->next_offset< sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {duprintf("checking: element %p size %u\n",e, e->next_offset);return -EINVAL;}err = check_entry(e);if (err)return err;/* Check hooks & underflows */for (h = 0; h < NF_INET_NUMHOOKS; h++) {if (!(valid_hooks & (1 << h)))continue;if ((unsigned char *)e - base == hook_entries[h])newinfo->hook_entry[h] = hook_entries[h];if ((unsigned char *)e - base == underflows[h]) {if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n");return -EINVAL;}newinfo->underflow[h] = underflows[h];}}/* Clear counters and comefrom */e->counters = ((struct xt_counters) { 0, 0 });e->comefrom = 0;return 0;} |
0 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ash/wm/toplevel_window_event_handler.h"
#include "ash/public/cpp/shell_window_ids.h"
#include "ash/root_window_controller.h"
#include "ash/shell.h"
#include "ash/test/ash_test_base.h"
#include "ash/wm/resize_shadow.h"
#include "ash/wm/resize_shadow_controller.h"
#include "ash/wm/window_state.h"
#include "ash/wm/window_util.h"
#include "ash/wm/wm_event.h"
#include "ash/wm/workspace_controller.h"
#include "base/compiler_specific.h"
#include "base/threading/thread_task_runner_handle.h"
#include "services/ui/public/interfaces/window_manager_constants.mojom.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/aura/client/aura_constants.h"
#include "ui/aura/client/capture_client.h"
#include "ui/aura/test/aura_test_base.h"
#include "ui/aura/test/test_window_delegate.h"
#include "ui/aura/window_event_dispatcher.h"
#include "ui/base/hit_test.h"
#include "ui/display/display_layout_builder.h"
#include "ui/display/manager/display_manager.h"
#include "ui/display/screen.h"
#include "ui/events/event.h"
#include "ui/events/test/event_generator.h"
#include "ui/wm/core/window_util.h"
#include "ui/wm/public/window_move_client.h"
namespace ash {
namespace {
// A simple window delegate that returns the specified hit-test code when
// requested and applies a minimum size constraint if there is one.
class TestWindowDelegate : public aura::test::TestWindowDelegate {
public:
explicit TestWindowDelegate(int hittest_code) {
set_window_component(hittest_code);
}
~TestWindowDelegate() override = default;
private:
// Overridden from aura::Test::TestWindowDelegate:
void OnWindowDestroyed(aura::Window* window) override { delete this; }
DISALLOW_COPY_AND_ASSIGN(TestWindowDelegate);
};
class ToplevelWindowEventHandlerTest : public AshTestBase {
public:
ToplevelWindowEventHandlerTest() = default;
~ToplevelWindowEventHandlerTest() override = default;
protected:
aura::Window* CreateWindow(int hittest_code) {
TestWindowDelegate* d1 = new TestWindowDelegate(hittest_code);
aura::Window* w1 = new aura::Window(d1);
w1->SetType(aura::client::WINDOW_TYPE_NORMAL);
w1->set_id(1);
w1->Init(ui::LAYER_TEXTURED);
aura::Window* parent = Shell::GetContainer(Shell::GetPrimaryRootWindow(),
kShellWindowId_DefaultContainer);
parent->AddChild(w1);
w1->SetBounds(gfx::Rect(0, 0, 100, 100));
w1->Show();
return w1;
}
void DragFromCenterBy(aura::Window* window, int dx, int dy) {
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(), window);
generator.DragMouseBy(dx, dy);
}
void TouchDragFromCenterBy(aura::Window* window, int dx, int dy) {
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(), window);
generator.PressMoveAndReleaseTouchBy(dx, dy);
}
std::unique_ptr<ToplevelWindowEventHandler> handler_;
private:
DISALLOW_COPY_AND_ASSIGN(ToplevelWindowEventHandlerTest);
};
} // namespace
TEST_F(ToplevelWindowEventHandlerTest, Caption) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTCAPTION));
gfx::Size size = w1->bounds().size();
DragFromCenterBy(w1.get(), 100, 100);
// Position should have been offset by 100,100.
EXPECT_EQ("100,100", w1->bounds().origin().ToString());
// Size should not have.
EXPECT_EQ(size.ToString(), w1->bounds().size().ToString());
TouchDragFromCenterBy(w1.get(), 100, 100);
// Position should have been offset by 100,100.
EXPECT_EQ("200,200", w1->bounds().origin().ToString());
// Size should not have.
EXPECT_EQ(size.ToString(), w1->bounds().size().ToString());
}
namespace {
void ContinueAndCompleteDrag(ui::test::EventGenerator* generator,
wm::WindowState* window_state,
aura::Window* window) {
ASSERT_TRUE(window->HasCapture());
ASSERT_FALSE(window_state->GetWindowPositionManaged());
generator->DragMouseBy(100, 100);
generator->ReleaseLeftButton();
}
} // namespace
// Tests dragging restores expected window position auto manage property
// correctly.
TEST_F(ToplevelWindowEventHandlerTest, WindowPositionAutoManagement) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTNOWHERE));
const gfx::Size size = w1->bounds().size();
wm::WindowState* window_state = ash::wm::GetWindowState(w1.get());
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(), w1.get());
// Explicitly enable window position auto management, and expect it to be
// restored after drag completes.
window_state->SetWindowPositionManaged(true);
generator.PressLeftButton();
::wm::WindowMoveClient* move_client =
::wm::GetWindowMoveClient(w1->GetRootWindow());
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&ContinueAndCompleteDrag, base::Unretained(&generator),
base::Unretained(window_state), base::Unretained(w1.get())));
EXPECT_EQ(::wm::MOVE_SUCCESSFUL,
move_client->RunMoveLoop(w1.get(), gfx::Vector2d(100, 100),
::wm::WINDOW_MOVE_SOURCE_MOUSE));
// Window position auto manage property should be restored to true.
EXPECT_TRUE(window_state->GetWindowPositionManaged());
// Position should have been offset by 100,100.
EXPECT_EQ("100,100", w1->bounds().origin().ToString());
// Size should remain the same.
EXPECT_EQ(size.ToString(), w1->bounds().size().ToString());
// Explicitly disable window position auto management, and expect it to be
// restored after drag completes.
window_state->SetWindowPositionManaged(false);
generator.PressLeftButton();
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&ContinueAndCompleteDrag, base::Unretained(&generator),
base::Unretained(window_state), base::Unretained(w1.get())));
EXPECT_EQ(::wm::MOVE_SUCCESSFUL,
move_client->RunMoveLoop(w1.get(), gfx::Vector2d(100, 100),
::wm::WINDOW_MOVE_SOURCE_MOUSE));
// Window position auto manage property should be restored to true.
EXPECT_FALSE(window_state->GetWindowPositionManaged());
// Position should have been offset by 100,100.
EXPECT_EQ("200,200", w1->bounds().origin().ToString());
// Size should remain the same.
EXPECT_EQ(size.ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, BottomRight) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTBOTTOMRIGHT));
gfx::Point position = w1->bounds().origin();
DragFromCenterBy(w1.get(), 100, 100);
// Position should not have changed.
EXPECT_EQ(position.ToString(), w1->bounds().origin().ToString());
// Size should have increased by 100,100.
EXPECT_EQ(gfx::Size(200, 200).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, GrowBox) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTGROWBOX));
TestWindowDelegate* window_delegate =
static_cast<TestWindowDelegate*>(w1->delegate());
window_delegate->set_minimum_size(gfx::Size(40, 40));
gfx::Point position = w1->bounds().origin();
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow());
generator.MoveMouseToCenterOf(w1.get());
generator.DragMouseBy(100, 100);
// Position should not have changed.
EXPECT_EQ(position.ToString(), w1->bounds().origin().ToString());
// Size should have increased by 100,100.
EXPECT_EQ(gfx::Size(200, 200).ToString(), w1->bounds().size().ToString());
// Shrink the wnidow by (-100, -100).
generator.DragMouseBy(-100, -100);
// Position should not have changed.
EXPECT_EQ(position.ToString(), w1->bounds().origin().ToString());
// Size should have decreased by 100,100.
EXPECT_EQ(gfx::Size(100, 100).ToString(), w1->bounds().size().ToString());
// Enforce minimum size.
generator.DragMouseBy(-60, -60);
EXPECT_EQ(position.ToString(), w1->bounds().origin().ToString());
EXPECT_EQ(gfx::Size(40, 40).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, Right) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTRIGHT));
gfx::Point position = w1->bounds().origin();
DragFromCenterBy(w1.get(), 100, 100);
// Position should not have changed.
EXPECT_EQ(position.ToString(), w1->bounds().origin().ToString());
// Size should have increased by 100,0.
EXPECT_EQ(gfx::Size(200, 100).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, Bottom) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTBOTTOM));
gfx::Point position = w1->bounds().origin();
DragFromCenterBy(w1.get(), 100, 100);
// Position should not have changed.
EXPECT_EQ(position.ToString(), w1->bounds().origin().ToString());
// Size should have increased by 0,100.
EXPECT_EQ(gfx::Size(100, 200).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, TopRight) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTTOPRIGHT));
DragFromCenterBy(w1.get(), -50, 50);
// Position should have been offset by 0,50.
EXPECT_EQ(gfx::Point(0, 50).ToString(), w1->bounds().origin().ToString());
// Size should have decreased by 50,50.
EXPECT_EQ(gfx::Size(50, 50).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, Top) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTTOP));
DragFromCenterBy(w1.get(), 50, 50);
// Position should have been offset by 0,50.
EXPECT_EQ(gfx::Point(0, 50).ToString(), w1->bounds().origin().ToString());
// Size should have decreased by 0,50.
EXPECT_EQ(gfx::Size(100, 50).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, Left) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTLEFT));
DragFromCenterBy(w1.get(), 50, 50);
// Position should have been offset by 50,0.
EXPECT_EQ(gfx::Point(50, 0).ToString(), w1->bounds().origin().ToString());
// Size should have decreased by 50,0.
EXPECT_EQ(gfx::Size(50, 100).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, BottomLeft) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTBOTTOMLEFT));
DragFromCenterBy(w1.get(), 50, -50);
// Position should have been offset by 50,0.
EXPECT_EQ(gfx::Point(50, 0).ToString(), w1->bounds().origin().ToString());
// Size should have decreased by 50,50.
EXPECT_EQ(gfx::Size(50, 50).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, TopLeft) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTTOPLEFT));
DragFromCenterBy(w1.get(), 50, 50);
// Position should have been offset by 50,50.
EXPECT_EQ(gfx::Point(50, 50).ToString(), w1->bounds().origin().ToString());
// Size should have decreased by 50,50.
EXPECT_EQ(gfx::Size(50, 50).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, Client) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTCLIENT));
gfx::Rect bounds = w1->bounds();
DragFromCenterBy(w1.get(), 100, 100);
// Neither position nor size should have changed.
EXPECT_EQ(bounds.ToString(), w1->bounds().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, LeftPastMinimum) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTLEFT));
TestWindowDelegate* window_delegate =
static_cast<TestWindowDelegate*>(w1->delegate());
window_delegate->set_minimum_size(gfx::Size(40, 40));
// Simulate a large left-to-right drag. Window width should be clamped to
// minimum and position change should be limited as well.
DragFromCenterBy(w1.get(), 333, 0);
EXPECT_EQ(gfx::Point(60, 0).ToString(), w1->bounds().origin().ToString());
EXPECT_EQ(gfx::Size(40, 100).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, RightPastMinimum) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTRIGHT));
TestWindowDelegate* window_delegate =
static_cast<TestWindowDelegate*>(w1->delegate());
window_delegate->set_minimum_size(gfx::Size(40, 40));
gfx::Point position = w1->bounds().origin();
// Simulate a large right-to-left drag. Window width should be clamped to
// minimum and position should not change.
DragFromCenterBy(w1.get(), -333, 0);
EXPECT_EQ(position.ToString(), w1->bounds().origin().ToString());
EXPECT_EQ(gfx::Size(40, 100).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, TopLeftPastMinimum) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTTOPLEFT));
TestWindowDelegate* window_delegate =
static_cast<TestWindowDelegate*>(w1->delegate());
window_delegate->set_minimum_size(gfx::Size(40, 40));
// Simulate a large top-left to bottom-right drag. Window width should be
// clamped to minimum and position should be limited.
DragFromCenterBy(w1.get(), 333, 444);
EXPECT_EQ(gfx::Point(60, 60).ToString(), w1->bounds().origin().ToString());
EXPECT_EQ(gfx::Size(40, 40).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, TopRightPastMinimum) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTTOPRIGHT));
TestWindowDelegate* window_delegate =
static_cast<TestWindowDelegate*>(w1->delegate());
window_delegate->set_minimum_size(gfx::Size(40, 40));
// Simulate a large top-right to bottom-left drag. Window size should be
// clamped to minimum, x position should not change, and y position should
// be clamped.
DragFromCenterBy(w1.get(), -333, 444);
EXPECT_EQ(gfx::Point(0, 60).ToString(), w1->bounds().origin().ToString());
EXPECT_EQ(gfx::Size(40, 40).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, BottomLeftPastMinimum) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTBOTTOMLEFT));
TestWindowDelegate* window_delegate =
static_cast<TestWindowDelegate*>(w1->delegate());
window_delegate->set_minimum_size(gfx::Size(40, 40));
// Simulate a large bottom-left to top-right drag. Window size should be
// clamped to minimum, x position should be clamped, and y position should
// not change.
DragFromCenterBy(w1.get(), 333, -444);
EXPECT_EQ(gfx::Point(60, 0).ToString(), w1->bounds().origin().ToString());
EXPECT_EQ(gfx::Size(40, 40).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, BottomRightPastMinimum) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTBOTTOMRIGHT));
TestWindowDelegate* window_delegate =
static_cast<TestWindowDelegate*>(w1->delegate());
window_delegate->set_minimum_size(gfx::Size(40, 40));
gfx::Point position = w1->bounds().origin();
// Simulate a large bottom-right to top-left drag. Window size should be
// clamped to minimum and position should not change.
DragFromCenterBy(w1.get(), -333, -444);
EXPECT_EQ(position.ToString(), w1->bounds().origin().ToString());
EXPECT_EQ(gfx::Size(40, 40).ToString(), w1->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, BottomRightWorkArea) {
std::unique_ptr<aura::Window> target(CreateWindow(HTBOTTOMRIGHT));
gfx::Rect work_area = display::Screen::GetScreen()
->GetDisplayNearestWindow(target.get())
.work_area();
gfx::Point position = target->bounds().origin();
// Drag further than work_area bottom.
DragFromCenterBy(target.get(), 100, work_area.height());
// Position should not have changed.
EXPECT_EQ(position.ToString(), target->bounds().origin().ToString());
// Size should have increased by 100, work_area.height() - target->bounds.y()
EXPECT_EQ(
gfx::Size(200, work_area.height() - target->bounds().y()).ToString(),
target->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, BottomLeftWorkArea) {
std::unique_ptr<aura::Window> target(CreateWindow(HTBOTTOMLEFT));
gfx::Rect work_area = display::Screen::GetScreen()
->GetDisplayNearestWindow(target.get())
.work_area();
gfx::Point position = target->bounds().origin();
// Drag further than work_area bottom.
DragFromCenterBy(target.get(), -30, work_area.height());
// origin is now at 70, 100.
EXPECT_EQ(position.x() - 30, target->bounds().x());
EXPECT_EQ(position.y(), target->bounds().y());
// Size should have increased by 30, work_area.height() - target->bounds.y()
EXPECT_EQ(
gfx::Size(130, work_area.height() - target->bounds().y()).ToString(),
target->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, BottomWorkArea) {
std::unique_ptr<aura::Window> target(CreateWindow(HTBOTTOM));
gfx::Rect work_area = display::Screen::GetScreen()
->GetDisplayNearestWindow(target.get())
.work_area();
gfx::Point position = target->bounds().origin();
// Drag further than work_area bottom.
DragFromCenterBy(target.get(), 0, work_area.height());
// Position should not have changed.
EXPECT_EQ(position.ToString(), target->bounds().origin().ToString());
// Size should have increased by 0, work_area.height() - target->bounds.y()
EXPECT_EQ(
gfx::Size(100, work_area.height() - target->bounds().y()).ToString(),
target->bounds().size().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, DontDragIfModalChild) {
std::unique_ptr<aura::Window> w1(CreateWindow(HTCAPTION));
std::unique_ptr<aura::Window> w2(CreateWindow(HTCAPTION));
w2->SetBounds(gfx::Rect(100, 0, 100, 100));
w2->SetProperty(aura::client::kModalKey, ui::MODAL_TYPE_WINDOW);
::wm::AddTransientChild(w1.get(), w2.get());
gfx::Size size = w1->bounds().size();
// Attempt to drag w1, position and size should not change because w1 has a
// modal child.
DragFromCenterBy(w1.get(), 100, 100);
EXPECT_EQ("0,0", w1->bounds().origin().ToString());
EXPECT_EQ(size.ToString(), w1->bounds().size().ToString());
TouchDragFromCenterBy(w1.get(), 100, 100);
EXPECT_EQ("0,0", w1->bounds().origin().ToString());
EXPECT_EQ(size.ToString(), w1->bounds().size().ToString());
}
// Verifies we don't let windows drag to a -y location.
TEST_F(ToplevelWindowEventHandlerTest, DontDragToNegativeY) {
std::unique_ptr<aura::Window> target(CreateWindow(HTTOP));
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
target.get());
generator.MoveMouseTo(0, 5);
generator.DragMouseBy(0, -5);
// The y location and height should not have changed.
EXPECT_EQ(0, target->bounds().y());
EXPECT_EQ(100, target->bounds().height());
}
// Verifies we don't let windows go bigger than the display width.
TEST_F(ToplevelWindowEventHandlerTest, DontGotWiderThanScreen) {
std::unique_ptr<aura::Window> target(CreateWindow(HTRIGHT));
gfx::Rect work_area = display::Screen::GetScreen()
->GetDisplayNearestWindow(target.get())
.bounds();
DragFromCenterBy(target.get(), work_area.width() * 2, 0);
// The y location and height should not have changed.
EXPECT_EQ(work_area.width(), target->bounds().width());
}
// Verifies that touch-gestures drag the window correctly.
TEST_F(ToplevelWindowEventHandlerTest, GestureDrag) {
std::unique_ptr<aura::Window> target(CreateTestWindowInShellWithDelegate(
new TestWindowDelegate(HTCAPTION), 0, gfx::Rect(0, 0, 100, 100)));
wm::WindowState* window_state = wm::GetWindowState(target.get());
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
target.get());
gfx::Rect old_bounds = target->bounds();
gfx::Point location(5, 5);
target->SetProperty(aura::client::kResizeBehaviorKey,
ui::mojom::kResizeBehaviorCanResize |
ui::mojom::kResizeBehaviorCanMaximize |
ui::mojom::kResizeBehaviorCanMinimize);
gfx::Point end = location;
// Snap right;
end.Offset(100, 0);
generator.GestureScrollSequence(location, end,
base::TimeDelta::FromMilliseconds(5), 10);
RunAllPendingInMessageLoop();
// Verify that the window has moved after the gesture.
EXPECT_NE(old_bounds.ToString(), target->bounds().ToString());
EXPECT_EQ(mojom::WindowStateType::RIGHT_SNAPPED,
window_state->GetStateType());
old_bounds = target->bounds();
// Snap left.
end = location = target->GetBoundsInRootWindow().CenterPoint();
end.Offset(-100, 0);
generator.GestureScrollSequence(location, end,
base::TimeDelta::FromMilliseconds(5), 10);
RunAllPendingInMessageLoop();
EXPECT_NE(old_bounds.ToString(), target->bounds().ToString());
EXPECT_EQ(mojom::WindowStateType::LEFT_SNAPPED, window_state->GetStateType());
gfx::Rect bounds_before_maximization = target->bounds();
bounds_before_maximization.Offset(0, 100);
target->SetBounds(bounds_before_maximization);
old_bounds = target->bounds();
// Maximize.
end = location = target->GetBoundsInRootWindow().CenterPoint();
end.Offset(0, -100);
generator.GestureScrollSequence(location, end,
base::TimeDelta::FromMilliseconds(5), 10);
RunAllPendingInMessageLoop();
EXPECT_NE(old_bounds.ToString(), target->bounds().ToString());
EXPECT_TRUE(window_state->IsMaximized());
EXPECT_EQ(old_bounds.ToString(),
window_state->GetRestoreBoundsInScreen().ToString());
window_state->Restore();
target->SetBounds(old_bounds);
// Minimize.
end = location = target->GetBoundsInRootWindow().CenterPoint();
end.Offset(0, 100);
generator.GestureScrollSequence(location, end,
base::TimeDelta::FromMilliseconds(5), 10);
RunAllPendingInMessageLoop();
EXPECT_NE(old_bounds.ToString(), target->bounds().ToString());
EXPECT_TRUE(window_state->IsMinimized());
EXPECT_TRUE(window_state->unminimize_to_restore_bounds());
EXPECT_EQ(old_bounds.ToString(),
window_state->GetRestoreBoundsInScreen().ToString());
}
// Tests that a gesture cannot minimize an unminimizeable window.
TEST_F(ToplevelWindowEventHandlerTest,
GestureAttemptMinimizeUnminimizeableWindow) {
std::unique_ptr<aura::Window> target(CreateWindow(HTCAPTION));
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
target.get());
gfx::Point location(5, 5);
target->SetProperty(aura::client::kResizeBehaviorKey,
ui::mojom::kResizeBehaviorCanMaximize);
gfx::Point end = location;
end.Offset(0, 100);
generator.GestureScrollSequence(location, end,
base::TimeDelta::FromMilliseconds(5), 10);
RunAllPendingInMessageLoop();
EXPECT_FALSE(wm::GetWindowState(target.get())->IsMinimized());
}
TEST_F(ToplevelWindowEventHandlerTest, TwoFingerDragDifferentDelta) {
std::unique_ptr<aura::Window> target(CreateWindow(HTCAPTION));
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
target.get());
const int kSteps = 10;
const int kTouchPoints = 2;
gfx::Point points[kTouchPoints] = {
gfx::Point(5, 5), // Within caption.
gfx::Point(55, 5), // Within caption.
};
gfx::Vector2d delta[kTouchPoints] = {
gfx::Vector2d(80, 80), gfx::Vector2d(20, 20),
};
int delay_adding_finger_ms[kTouchPoints] = {0, 0};
int delay_releasing_finger_ms[kTouchPoints] = {150, 150};
gfx::Rect bounds = target->bounds();
// Swipe right and down starting with two fingers. Two fingers have different
// moving deltas. The window position should move along the average vector of
// these two fingers.
generator.GestureMultiFingerScrollWithDelays(
kTouchPoints, points, delta, delay_adding_finger_ms,
delay_releasing_finger_ms, 15, kSteps);
bounds += gfx::Vector2d(50, 50);
EXPECT_EQ(bounds.ToString(), target->bounds().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, TwoFingerDragDelayAddFinger) {
std::unique_ptr<aura::Window> target(CreateWindow(HTCAPTION));
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
target.get());
const int kSteps = 10;
const int kTouchPoints = 2;
gfx::Point points[kTouchPoints] = {
gfx::Point(5, 5), // Within caption.
gfx::Point(55, 5), // Within caption.
};
gfx::Vector2d delta[kTouchPoints] = {
gfx::Vector2d(50, 50), gfx::Vector2d(50, 50),
};
int delay_adding_finger_ms[kTouchPoints] = {0, 90};
int delay_releasing_finger_ms[kTouchPoints] = {150, 150};
gfx::Rect bounds = target->bounds();
// Swipe right and down starting with one fingers. Add another finger at 90ms
// and continue dragging. The drag should continue without interrupt.
generator.GestureMultiFingerScrollWithDelays(
kTouchPoints, points, delta, delay_adding_finger_ms,
delay_releasing_finger_ms, 15, kSteps);
bounds += gfx::Vector2d(50, 50);
EXPECT_EQ(bounds.ToString(), target->bounds().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, TwoFingerDragDelayReleaseFinger) {
std::unique_ptr<aura::Window> target(CreateWindow(HTCAPTION));
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
target.get());
const int kSteps = 10;
const int kTouchPoints = 2;
gfx::Point points[kTouchPoints] = {
gfx::Point(5, 5), // Within caption.
gfx::Point(55, 5), // Within caption.
};
gfx::Vector2d delta[kTouchPoints] = {
gfx::Vector2d(50, 50), gfx::Vector2d(50, 50),
};
int delay_adding_finger_ms[kTouchPoints] = {0, 0};
int delay_releasing_finger_ms[kTouchPoints] = {150, 90};
gfx::Rect bounds = target->bounds();
// Swipe right and down starting with two fingers. Remove one finger at 90ms
// and continue dragging. The drag should continue without interrupt.
generator.GestureMultiFingerScrollWithDelays(
kTouchPoints, points, delta, delay_adding_finger_ms,
delay_releasing_finger_ms, 15, kSteps);
bounds += gfx::Vector2d(50, 50);
EXPECT_EQ(bounds.ToString(), target->bounds().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest,
TwoFingerDragDelayAdd2ndAndRelease2ndFinger) {
std::unique_ptr<aura::Window> target(CreateWindow(HTCAPTION));
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
target.get());
const int kSteps = 10;
const int kTouchPoints = 2;
gfx::Point points[kTouchPoints] = {
gfx::Point(5, 5), // Within caption.
gfx::Point(55, 5), // Within caption.
};
gfx::Vector2d delta[kTouchPoints] = {
gfx::Vector2d(50, 50), gfx::Vector2d(50, 50),
};
int delay_adding_finger_ms[kTouchPoints] = {0, 30};
int delay_releasing_finger_ms[kTouchPoints] = {150, 120};
gfx::Rect bounds = target->bounds();
// Swipe right and down starting with one fingers. Add second finger at 30ms,
// continue dragging, release second finger at 120ms and continue dragging.
// The drag should continue without interrupt.
generator.GestureMultiFingerScrollWithDelays(
kTouchPoints, points, delta, delay_adding_finger_ms,
delay_releasing_finger_ms, 15, kSteps);
bounds += gfx::Vector2d(50, 50);
EXPECT_EQ(bounds.ToString(), target->bounds().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest,
TwoFingerDragDelayAdd2ndAndRelease1stFinger) {
std::unique_ptr<aura::Window> target(CreateWindow(HTCAPTION));
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
target.get());
const int kSteps = 10;
const int kTouchPoints = 2;
gfx::Point points[kTouchPoints] = {
gfx::Point(5, 5), // Within caption.
gfx::Point(55, 5), // Within caption.
};
gfx::Vector2d delta[kTouchPoints] = {
gfx::Vector2d(50, 50), gfx::Vector2d(50, 50),
};
int delay_adding_finger_ms[kTouchPoints] = {0, 30};
int delay_releasing_finger_ms[kTouchPoints] = {120, 150};
gfx::Rect bounds = target->bounds();
// Swipe right and down starting with one fingers. Add second finger at 30ms,
// continue dragging, release first finger at 120ms and continue dragging.
// The drag should continue without interrupt.
generator.GestureMultiFingerScrollWithDelays(
kTouchPoints, points, delta, delay_adding_finger_ms,
delay_releasing_finger_ms, 15, kSteps);
bounds += gfx::Vector2d(50, 50);
EXPECT_EQ(bounds.ToString(), target->bounds().ToString());
}
TEST_F(ToplevelWindowEventHandlerTest, GestureDragToRestore) {
std::unique_ptr<aura::Window> window(CreateTestWindowInShellWithDelegate(
new TestWindowDelegate(HTCAPTION), 0, gfx::Rect(10, 20, 30, 40)));
window->Show();
wm::WindowState* window_state = wm::GetWindowState(window.get());
window_state->Activate();
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
window.get());
gfx::Rect old_bounds = window->bounds();
gfx::Point location, end;
end = location = window->GetBoundsInRootWindow().CenterPoint();
end.Offset(0, 100);
generator.GestureScrollSequence(location, end,
base::TimeDelta::FromMilliseconds(5), 10);
RunAllPendingInMessageLoop();
EXPECT_NE(old_bounds.ToString(), window->bounds().ToString());
EXPECT_TRUE(window_state->IsMinimized());
EXPECT_TRUE(window_state->unminimize_to_restore_bounds());
EXPECT_EQ(old_bounds.ToString(),
window_state->GetRestoreBoundsInScreen().ToString());
}
// Tests that EasyResizeWindowTargeter expands the hit-test area when a
// top-level window can be resized but not when the window is not resizable.
TEST_F(ToplevelWindowEventHandlerTest, EasyResizerUsedForTopLevel) {
std::unique_ptr<aura::Window> w1(CreateTestWindowInShellWithDelegate(
new TestWindowDelegate(HTCAPTION), -1, gfx::Rect(0, 0, 100, 100)));
std::unique_ptr<aura::Window> w2(CreateTestWindowInShellWithDelegate(
new TestWindowDelegate(HTCAPTION), -2, gfx::Rect(40, 40, 100, 100)));
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
gfx::Point(5, 5));
generator.PressMoveAndReleaseTouchTo(gfx::Point(5, 5));
EXPECT_TRUE(wm::IsActiveWindow(w1.get()));
// Make |w1| resizable to allow touch events to go to it (and not |w2|) thanks
// to EasyResizeWindowTargeter.
w1->SetProperty(aura::client::kResizeBehaviorKey,
ui::mojom::kResizeBehaviorCanMaximize |
ui::mojom::kResizeBehaviorCanMinimize |
ui::mojom::kResizeBehaviorCanResize);
// Clicking a point within |w2| but close to |w1| should not activate |w2|.
const gfx::Point touch_point(105, 105);
generator.MoveTouch(touch_point);
generator.PressMoveAndReleaseTouchTo(touch_point);
EXPECT_TRUE(wm::IsActiveWindow(w1.get()));
// Make |w1| not resizable to allow touch events to go to |w2| even when close
// to |w1|.
w1->SetProperty(aura::client::kResizeBehaviorKey,
ui::mojom::kResizeBehaviorCanMaximize |
ui::mojom::kResizeBehaviorCanMinimize);
// Clicking a point within |w2| should activate that window.
generator.PressMoveAndReleaseTouchTo(touch_point);
EXPECT_TRUE(wm::IsActiveWindow(w2.get()));
}
// Tests that EasyResizeWindowTargeter expands the hit-test area when a
// window is a transient child of a top-level window and is resizable.
TEST_F(ToplevelWindowEventHandlerTest, EasyResizerUsedForTransient) {
std::unique_ptr<aura::Window> w1(CreateTestWindowInShellWithDelegate(
new TestWindowDelegate(HTCAPTION), -1, gfx::Rect(0, 0, 100, 100)));
std::unique_ptr<aura::Window> w11(CreateTestWindowInShellWithDelegate(
new TestWindowDelegate(HTCAPTION), -11, gfx::Rect(20, 20, 50, 50)));
::wm::AddTransientChild(w1.get(), w11.get());
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
gfx::Point(10, 10));
// Make |w11| non-resizable to avoid touch events inside its transient parent
// |w1| from going to |w11| because of EasyResizeWindowTargeter.
w11->SetProperty(aura::client::kResizeBehaviorKey,
ui::mojom::kResizeBehaviorCanMaximize |
ui::mojom::kResizeBehaviorCanMinimize);
// Clicking a point within w1 should activate that window.
generator.PressMoveAndReleaseTouchTo(gfx::Point(10, 10));
EXPECT_TRUE(wm::IsActiveWindow(w1.get()));
// Make |w11| resizable to allow touch events inside its transient parent
// |w1| that are close to |w11| border to go to |w11| thanks to
// EasyResizeWindowTargeter.
w11->SetProperty(aura::client::kResizeBehaviorKey,
ui::mojom::kResizeBehaviorCanMaximize |
ui::mojom::kResizeBehaviorCanMinimize |
ui::mojom::kResizeBehaviorCanResize);
// Clicking a point within |w1| but close to |w11| should activate |w11|.
generator.PressMoveAndReleaseTouchTo(gfx::Point(10, 10));
EXPECT_TRUE(wm::IsActiveWindow(w11.get()));
}
// Tests that an unresizable window cannot be dragged or snapped using gestures.
TEST_F(ToplevelWindowEventHandlerTest, GestureDragForUnresizableWindow) {
std::unique_ptr<aura::Window> target(CreateWindow(HTCAPTION));
wm::WindowState* window_state = wm::GetWindowState(target.get());
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
target.get());
gfx::Rect old_bounds = target->bounds();
gfx::Point location(5, 5);
target->SetProperty(aura::client::kResizeBehaviorKey,
ui::mojom::kResizeBehaviorNone);
gfx::Point end = location;
// Try to snap right. The window is not resizable. So it should not snap.
end.Offset(100, 0);
generator.GestureScrollSequence(location, end,
base::TimeDelta::FromMilliseconds(5), 10);
RunAllPendingInMessageLoop();
// Verify that the window has moved after the gesture.
gfx::Rect expected_bounds(old_bounds);
expected_bounds.Offset(gfx::Vector2d(100, 0));
EXPECT_EQ(expected_bounds.ToString(), target->bounds().ToString());
// Verify that the window did not snap left.
EXPECT_TRUE(window_state->IsNormalStateType());
old_bounds = target->bounds();
// Try to snap left. It should not snap.
end = location = target->GetBoundsInRootWindow().CenterPoint();
end.Offset(-100, 0);
generator.GestureScrollSequence(location, end,
base::TimeDelta::FromMilliseconds(5), 10);
RunAllPendingInMessageLoop();
// Verify that the window has moved after the gesture.
expected_bounds = old_bounds;
expected_bounds.Offset(gfx::Vector2d(-100, 0));
EXPECT_EQ(expected_bounds.ToString(), target->bounds().ToString());
// Verify that the window did not snap left.
EXPECT_TRUE(window_state->IsNormalStateType());
}
// Tests that dragging multiple windows at the same time is not allowed.
TEST_F(ToplevelWindowEventHandlerTest, GestureDragMultipleWindows) {
std::unique_ptr<aura::Window> target(CreateTestWindowInShellWithDelegate(
new TestWindowDelegate(HTCAPTION), 0, gfx::Rect(0, 0, 100, 100)));
std::unique_ptr<aura::Window> notmoved(CreateTestWindowInShellWithDelegate(
new TestWindowDelegate(HTCAPTION), 1, gfx::Rect(100, 0, 100, 100)));
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
target.get());
gfx::Point location(5, 5);
// Send some touch events to start dragging |target|.
generator.MoveTouch(location);
generator.PressTouch();
location.Offset(40, 5);
generator.MoveTouch(location);
// Try to drag |notmoved| window. This should not move the window.
{
gfx::Rect bounds = notmoved->bounds();
ui::test::EventGenerator gen(Shell::GetPrimaryRootWindow(), notmoved.get());
gfx::Point start = notmoved->bounds().origin() + gfx::Vector2d(10, 10);
gfx::Point end = start + gfx::Vector2d(100, 10);
gen.GestureScrollSequence(start, end, base::TimeDelta::FromMilliseconds(10),
10);
EXPECT_EQ(bounds.ToString(), notmoved->bounds().ToString());
}
}
// Verifies pressing escape resets the bounds to the original bounds.
TEST_F(ToplevelWindowEventHandlerTest, EscapeReverts) {
std::unique_ptr<aura::Window> target(CreateWindow(HTBOTTOMRIGHT));
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
target.get());
generator.PressLeftButton();
generator.MoveMouseBy(10, 11);
// Execute any scheduled draws so that pending mouse events are processed.
RunAllPendingInMessageLoop();
EXPECT_EQ("0,0 110x111", target->bounds().ToString());
generator.PressKey(ui::VKEY_ESCAPE, 0);
generator.ReleaseKey(ui::VKEY_ESCAPE, 0);
EXPECT_EQ("0,0 100x100", target->bounds().ToString());
}
// Verifies window minimization/maximization completes drag.
TEST_F(ToplevelWindowEventHandlerTest, MinimizeMaximizeCompletes) {
// Once window is minimized, window dragging completes.
{
std::unique_ptr<aura::Window> target(CreateWindow(HTCAPTION));
target->Focus();
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
target.get());
generator.PressLeftButton();
generator.MoveMouseBy(10, 11);
RunAllPendingInMessageLoop();
EXPECT_EQ("10,11 100x100", target->bounds().ToString());
wm::WindowState* window_state = wm::GetWindowState(target.get());
window_state->Minimize();
window_state->Restore();
generator.PressLeftButton();
generator.MoveMouseBy(10, 11);
RunAllPendingInMessageLoop();
EXPECT_EQ("10,11 100x100", target->bounds().ToString());
}
// Once window is maximized, window dragging completes.
{
std::unique_ptr<aura::Window> target(CreateWindow(HTCAPTION));
target->Focus();
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
target.get());
generator.PressLeftButton();
generator.MoveMouseBy(10, 11);
RunAllPendingInMessageLoop();
EXPECT_EQ("10,11 100x100", target->bounds().ToString());
wm::WindowState* window_state = wm::GetWindowState(target.get());
window_state->Maximize();
window_state->Restore();
generator.PressLeftButton();
generator.MoveMouseBy(10, 11);
RunAllPendingInMessageLoop();
EXPECT_EQ("10,11 100x100", target->bounds().ToString());
}
}
// Verifies that a drag cannot be started via
// wm::WindowMoveClient::RunMoveLoop() while another drag is already
// in progress.
TEST_F(ToplevelWindowEventHandlerTest, RunMoveLoopFailsDuringInProgressDrag) {
std::unique_ptr<aura::Window> window1(CreateWindow(HTCAPTION));
EXPECT_EQ("0,0 100x100", window1->bounds().ToString());
std::unique_ptr<aura::Window> window2(CreateWindow(HTCAPTION));
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
window1.get());
window1->Focus();
generator.PressLeftButton();
generator.MoveMouseBy(10, 11);
EXPECT_EQ("10,11 100x100", window1->bounds().ToString());
::wm::WindowMoveClient* move_client =
::wm::GetWindowMoveClient(window2->GetRootWindow());
EXPECT_EQ(::wm::MOVE_CANCELED,
move_client->RunMoveLoop(window2.get(), gfx::Vector2d(),
::wm::WINDOW_MOVE_SOURCE_MOUSE));
generator.ReleaseLeftButton();
EXPECT_EQ("10,11 100x100", window1->bounds().ToString());
}
namespace {
void SendMouseReleaseAndReleaseCapture(ui::test::EventGenerator* generator,
aura::Window* window) {
generator->ReleaseLeftButton();
window->ReleaseCapture();
}
} // namespace
// Test that a drag is successful even if ET_MOUSE_CAPTURE_CHANGED is sent
// immediately after the mouse release. views::Widget has this behavior.
TEST_F(ToplevelWindowEventHandlerTest, CaptureLossAfterMouseRelease) {
std::unique_ptr<aura::Window> window(CreateWindow(HTNOWHERE));
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
window.get());
generator.PressLeftButton();
window->SetCapture();
::wm::WindowMoveClient* move_client =
::wm::GetWindowMoveClient(window->GetRootWindow());
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&SendMouseReleaseAndReleaseCapture,
base::Unretained(&generator), base::Unretained(window.get())));
EXPECT_EQ(::wm::MOVE_SUCCESSFUL,
move_client->RunMoveLoop(window.get(), gfx::Vector2d(),
::wm::WINDOW_MOVE_SOURCE_MOUSE));
}
namespace {
// Checks that |window| has capture and releases capture.
void CheckHasCaptureAndReleaseCapture(aura::Window* window) {
ASSERT_TRUE(window->HasCapture());
window->ReleaseCapture();
}
} // namespace
// Test that releasing capture completes an in-progress gesture drag.
TEST_F(ToplevelWindowEventHandlerTest, GestureDragCaptureLoss) {
std::unique_ptr<aura::Window> window(CreateWindow(HTNOWHERE));
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(),
window.get());
::wm::WindowMoveClient* move_client =
::wm::GetWindowMoveClient(window->GetRootWindow());
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&CheckHasCaptureAndReleaseCapture,
base::Unretained(window.get())));
EXPECT_EQ(::wm::MOVE_SUCCESSFUL,
move_client->RunMoveLoop(window.get(), gfx::Vector2d(),
::wm::WINDOW_MOVE_SOURCE_TOUCH));
}
// Tests that dragging a snapped window to another display updates the
// window's bounds correctly.
TEST_F(ToplevelWindowEventHandlerTest, DragSnappedWindowToExternalDisplay) {
UpdateDisplay("940x550,940x550");
int64_t primary_id = display::Screen::GetScreen()->GetPrimaryDisplay().id();
int64_t secondary_id = display_manager()->GetSecondaryDisplay().id();
display::DisplayLayoutBuilder builder(primary_id);
builder.SetSecondaryPlacement(secondary_id, display::DisplayPlacement::TOP,
0);
display_manager()->SetLayoutForCurrentDisplays(builder.Build());
const gfx::Size initial_window_size(330, 230);
std::unique_ptr<aura::Window> w1(CreateTestWindowInShellWithDelegateAndType(
new TestWindowDelegate(HTCAPTION), aura::client::WINDOW_TYPE_NORMAL, 0,
gfx::Rect(initial_window_size)));
// Snap the window to the right.
wm::WindowState* window_state = wm::GetWindowState(w1.get());
ASSERT_TRUE(window_state->CanSnap());
const wm::WMEvent event(wm::WM_EVENT_CYCLE_SNAP_RIGHT);
window_state->OnWMEvent(&event);
ASSERT_TRUE(window_state->IsSnapped());
// Drag the window to the secondary display.
ui::test::EventGenerator generator(Shell::GetPrimaryRootWindow(), w1.get());
generator.DragMouseTo(472, -462);
// Expect the window is no longer snapped and its size was restored to the
// initial size.
EXPECT_FALSE(window_state->IsSnapped());
EXPECT_EQ(initial_window_size.ToString(), w1->bounds().size().ToString());
// The window is now fully contained in the secondary display.
EXPECT_TRUE(display_manager()->GetSecondaryDisplay().bounds().Contains(
w1->GetBoundsInScreen()));
}
// Showing the resize shadows when the mouse is over the window edges is
// tested in resize_shadow_and_cursor_test.cc
} // namespace ash
|
1 | SecurityContext::SecurityContext()
: m_mayDisplaySeamlesslyWithParent(false)
, m_haveInitializedSecurityOrigin(false)
, m_sandboxFlags(variable_5084918002333090551)
{
}
|
1 | static inline int map_from_unicode(unsigned code, enum entity_charset charset, unsigned *res)
{
unsigned char found;
const uni_to_enc *table;
size_t table_size;
switch (charset) {
case cs_8859_1:
/* identity mapping of code points to unicode */
if (code > 0xFF) {
return FAILURE;
}
*res = code;
break;
case cs_8859_5:
if (code <= 0xA0 || code == 0xAD /* soft hyphen */) {
*res = code;
} else if (code == 0x2116) {
*res = 0xF0; /* numero sign */
} else if (code == 0xA7) {
*res = 0xFD; /* section sign */
} else if (code >= 0x0401 && code <= 0x044F) {
if (code == 0x040D || code == 0x0450 || code == 0x045D)
return FAILURE;
*res = code - 0x360;
} else {
return FAILURE;
}
break;
case cs_8859_15:
if (code < 0xA4 || (code > 0xBE && code <= 0xFF)) {
*res = code;
} else { /* between A4 and 0xBE */
found = unimap_bsearch(unimap_iso885915,
code, sizeof(unimap_iso885915) / sizeof(*unimap_iso885915));
if (found)
*res = found;
else
return FAILURE;
}
break;
case cs_cp1252:
if (code <= 0x7F || (code >= 0xA0 && code <= 0xFF)) {
*res = code;
} else {
found = unimap_bsearch(unimap_win1252,
code, sizeof(unimap_win1252) / sizeof(*unimap_win1252));
if (found)
*res = found;
else
return FAILURE;
}
break;
case cs_macroman:
if (code == 0x7F)
return FAILURE;
table = unimap_macroman;
table_size = sizeof(unimap_macroman) / sizeof(*unimap_macroman);
goto table_over_7F;
case cs_cp1251:
table = unimap_win1251;
table_size = sizeof(unimap_win1251) / sizeof(*unimap_win1251);
goto table_over_7F;
case cs_koi8r:
table = unimap_koi8r;
table_size = sizeof(unimap_koi8r) / sizeof(*unimap_koi8r);
goto table_over_7F;
case cs_cp866:
table = unimap_cp866;
table_size = sizeof(unimap_cp866) / sizeof(*unimap_cp866);
table_over_7F:
if (code <= 0x7F) {
*res = code;
} else {
found = unimap_bsearch(table, code, table_size);
if (found)
*res = found;
else
return FAILURE;
}
break;
/* from here on, only map the possible characters in the ASCII range.
* to improve support here, it's a matter of building the unicode mappings.
* See <http://www.unicode.org/Public/6.0.0/ucd/Unihan.zip> */
case cs_sjis:
case cs_eucjp:
/* we interpret 0x5C as the Yen symbol. This is not universal.
* See <http://www.w3.org/Submission/japanese-xml/#ambiguity_of_yen> */
if (code >= 0x20 && code <= 0x7D) {
if (code == 0x5C)
return FAILURE;
*res = code;
} else {
return FAILURE;
}
break;
case cs_big5:
case cs_big5hkscs:
case cs_gb2312:
if (code >= 0x20 && code <= 0x7D) {
*res = code;
} else {
return FAILURE;
}
break;
default:
return FAILURE;
}
return SUCCESS;
}
|
0 | /* -*- mode: C; c-file-style: "gnu" -*- */
/* xdgmimeparent.h: Private file. Datastructure for storing the hierarchy.
*
* More info can be found at http://www.freedesktop.org/standards/
*
* Copyright (C) 2004 Red Hat, Inc.
* Copyright (C) 200 Matthias Clasen <[email protected]>
*
* Licensed under the Academic Free License version 2.0
* Or under the following terms:
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#ifndef __XDG_MIME_PARENT_H__
#define __XDG_MIME_PARENT_H__
#include "xdgmime.h"
typedef struct XdgParentList XdgParentList;
#ifdef XDG_PREFIX
#define _xdg_mime_parent_read_from_file XDG_RESERVED_ENTRY(parent_read_from_file)
#define _xdg_mime_parent_list_new XDG_RESERVED_ENTRY(parent_list_new)
#define _xdg_mime_parent_list_free XDG_RESERVED_ENTRY(parent_list_free)
#define _xdg_mime_parent_list_lookup XDG_RESERVED_ENTRY(parent_list_lookup)
#define _xdg_mime_parent_list_dump XDG_RESERVED_ENTRY(parent_list_dump)
#endif
void _xdg_mime_parent_read_from_file (XdgParentList *list,
const char *file_name);
XdgParentList *_xdg_mime_parent_list_new (void);
void _xdg_mime_parent_list_free (XdgParentList *list);
const char **_xdg_mime_parent_list_lookup (XdgParentList *list,
const char *mime);
void _xdg_mime_parent_list_dump (XdgParentList *list);
#endif /* __XDG_MIME_PARENT_H__ */
|
0 | /*
* Prototypes, etc. for the Freescale MPC52xx embedded cpu chips
* May need to be cleaned as the port goes on ...
*
* Copyright (C) 2004-2005 Sylvain Munaut <[email protected]>
* Copyright (C) 2003 MontaVista, Software, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef __ASM_POWERPC_MPC52xx_H__
#define __ASM_POWERPC_MPC52xx_H__
#ifndef __ASSEMBLY__
#include <asm/types.h>
#include <asm/prom.h>
#include <asm/mpc5xxx.h>
#endif /* __ASSEMBLY__ */
#include <linux/suspend.h>
/* Variants of the 5200(B) */
#define MPC5200_SVR 0x80110010
#define MPC5200_SVR_MASK 0xfffffff0
#define MPC5200B_SVR 0x80110020
#define MPC5200B_SVR_MASK 0xfffffff0
/* ======================================================================== */
/* Structures mapping of some unit register set */
/* ======================================================================== */
#ifndef __ASSEMBLY__
/* Memory Mapping Control */
struct mpc52xx_mmap_ctl {
u32 mbar; /* MMAP_CTRL + 0x00 */
u32 cs0_start; /* MMAP_CTRL + 0x04 */
u32 cs0_stop; /* MMAP_CTRL + 0x08 */
u32 cs1_start; /* MMAP_CTRL + 0x0c */
u32 cs1_stop; /* MMAP_CTRL + 0x10 */
u32 cs2_start; /* MMAP_CTRL + 0x14 */
u32 cs2_stop; /* MMAP_CTRL + 0x18 */
u32 cs3_start; /* MMAP_CTRL + 0x1c */
u32 cs3_stop; /* MMAP_CTRL + 0x20 */
u32 cs4_start; /* MMAP_CTRL + 0x24 */
u32 cs4_stop; /* MMAP_CTRL + 0x28 */
u32 cs5_start; /* MMAP_CTRL + 0x2c */
u32 cs5_stop; /* MMAP_CTRL + 0x30 */
u32 sdram0; /* MMAP_CTRL + 0x34 */
u32 sdram1; /* MMAP_CTRL + 0X38 */
u32 reserved[4]; /* MMAP_CTRL + 0x3c .. 0x48 */
u32 boot_start; /* MMAP_CTRL + 0x4c */
u32 boot_stop; /* MMAP_CTRL + 0x50 */
u32 ipbi_ws_ctrl; /* MMAP_CTRL + 0x54 */
u32 cs6_start; /* MMAP_CTRL + 0x58 */
u32 cs6_stop; /* MMAP_CTRL + 0x5c */
u32 cs7_start; /* MMAP_CTRL + 0x60 */
u32 cs7_stop; /* MMAP_CTRL + 0x64 */
};
/* SDRAM control */
struct mpc52xx_sdram {
u32 mode; /* SDRAM + 0x00 */
u32 ctrl; /* SDRAM + 0x04 */
u32 config1; /* SDRAM + 0x08 */
u32 config2; /* SDRAM + 0x0c */
};
/* SDMA */
struct mpc52xx_sdma {
u32 taskBar; /* SDMA + 0x00 */
u32 currentPointer; /* SDMA + 0x04 */
u32 endPointer; /* SDMA + 0x08 */
u32 variablePointer; /* SDMA + 0x0c */
u8 IntVect1; /* SDMA + 0x10 */
u8 IntVect2; /* SDMA + 0x11 */
u16 PtdCntrl; /* SDMA + 0x12 */
u32 IntPend; /* SDMA + 0x14 */
u32 IntMask; /* SDMA + 0x18 */
u16 tcr[16]; /* SDMA + 0x1c .. 0x3a */
u8 ipr[32]; /* SDMA + 0x3c .. 0x5b */
u32 cReqSelect; /* SDMA + 0x5c */
u32 task_size0; /* SDMA + 0x60 */
u32 task_size1; /* SDMA + 0x64 */
u32 MDEDebug; /* SDMA + 0x68 */
u32 ADSDebug; /* SDMA + 0x6c */
u32 Value1; /* SDMA + 0x70 */
u32 Value2; /* SDMA + 0x74 */
u32 Control; /* SDMA + 0x78 */
u32 Status; /* SDMA + 0x7c */
u32 PTDDebug; /* SDMA + 0x80 */
};
/* GPT */
struct mpc52xx_gpt {
u32 mode; /* GPTx + 0x00 */
u32 count; /* GPTx + 0x04 */
u32 pwm; /* GPTx + 0x08 */
u32 status; /* GPTx + 0X0c */
};
/* GPIO */
struct mpc52xx_gpio {
u32 port_config; /* GPIO + 0x00 */
u32 simple_gpioe; /* GPIO + 0x04 */
u32 simple_ode; /* GPIO + 0x08 */
u32 simple_ddr; /* GPIO + 0x0c */
u32 simple_dvo; /* GPIO + 0x10 */
u32 simple_ival; /* GPIO + 0x14 */
u8 outo_gpioe; /* GPIO + 0x18 */
u8 reserved1[3]; /* GPIO + 0x19 */
u8 outo_dvo; /* GPIO + 0x1c */
u8 reserved2[3]; /* GPIO + 0x1d */
u8 sint_gpioe; /* GPIO + 0x20 */
u8 reserved3[3]; /* GPIO + 0x21 */
u8 sint_ode; /* GPIO + 0x24 */
u8 reserved4[3]; /* GPIO + 0x25 */
u8 sint_ddr; /* GPIO + 0x28 */
u8 reserved5[3]; /* GPIO + 0x29 */
u8 sint_dvo; /* GPIO + 0x2c */
u8 reserved6[3]; /* GPIO + 0x2d */
u8 sint_inten; /* GPIO + 0x30 */
u8 reserved7[3]; /* GPIO + 0x31 */
u16 sint_itype; /* GPIO + 0x34 */
u16 reserved8; /* GPIO + 0x36 */
u8 gpio_control; /* GPIO + 0x38 */
u8 reserved9[3]; /* GPIO + 0x39 */
u8 sint_istat; /* GPIO + 0x3c */
u8 sint_ival; /* GPIO + 0x3d */
u8 bus_errs; /* GPIO + 0x3e */
u8 reserved10; /* GPIO + 0x3f */
};
#define MPC52xx_GPIO_PSC_CONFIG_UART_WITHOUT_CD 4
#define MPC52xx_GPIO_PSC_CONFIG_UART_WITH_CD 5
#define MPC52xx_GPIO_PCI_DIS (1<<15)
/* GPIO with WakeUp*/
struct mpc52xx_gpio_wkup {
u8 wkup_gpioe; /* GPIO_WKUP + 0x00 */
u8 reserved1[3]; /* GPIO_WKUP + 0x03 */
u8 wkup_ode; /* GPIO_WKUP + 0x04 */
u8 reserved2[3]; /* GPIO_WKUP + 0x05 */
u8 wkup_ddr; /* GPIO_WKUP + 0x08 */
u8 reserved3[3]; /* GPIO_WKUP + 0x09 */
u8 wkup_dvo; /* GPIO_WKUP + 0x0C */
u8 reserved4[3]; /* GPIO_WKUP + 0x0D */
u8 wkup_inten; /* GPIO_WKUP + 0x10 */
u8 reserved5[3]; /* GPIO_WKUP + 0x11 */
u8 wkup_iinten; /* GPIO_WKUP + 0x14 */
u8 reserved6[3]; /* GPIO_WKUP + 0x15 */
u16 wkup_itype; /* GPIO_WKUP + 0x18 */
u8 reserved7[2]; /* GPIO_WKUP + 0x1A */
u8 wkup_maste; /* GPIO_WKUP + 0x1C */
u8 reserved8[3]; /* GPIO_WKUP + 0x1D */
u8 wkup_ival; /* GPIO_WKUP + 0x20 */
u8 reserved9[3]; /* GPIO_WKUP + 0x21 */
u8 wkup_istat; /* GPIO_WKUP + 0x24 */
u8 reserved10[3]; /* GPIO_WKUP + 0x25 */
};
/* XLB Bus control */
struct mpc52xx_xlb {
u8 reserved[0x40];
u32 config; /* XLB + 0x40 */
u32 version; /* XLB + 0x44 */
u32 status; /* XLB + 0x48 */
u32 int_enable; /* XLB + 0x4c */
u32 addr_capture; /* XLB + 0x50 */
u32 bus_sig_capture; /* XLB + 0x54 */
u32 addr_timeout; /* XLB + 0x58 */
u32 data_timeout; /* XLB + 0x5c */
u32 bus_act_timeout; /* XLB + 0x60 */
u32 master_pri_enable; /* XLB + 0x64 */
u32 master_priority; /* XLB + 0x68 */
u32 base_address; /* XLB + 0x6c */
u32 snoop_window; /* XLB + 0x70 */
};
#define MPC52xx_XLB_CFG_PLDIS (1 << 31)
#define MPC52xx_XLB_CFG_SNOOP (1 << 15)
/* Clock Distribution control */
struct mpc52xx_cdm {
u32 jtag_id; /* CDM + 0x00 reg0 read only */
u32 rstcfg; /* CDM + 0x04 reg1 read only */
u32 breadcrumb; /* CDM + 0x08 reg2 */
u8 mem_clk_sel; /* CDM + 0x0c reg3 byte0 */
u8 xlb_clk_sel; /* CDM + 0x0d reg3 byte1 read only */
u8 ipb_clk_sel; /* CDM + 0x0e reg3 byte2 */
u8 pci_clk_sel; /* CDM + 0x0f reg3 byte3 */
u8 ext_48mhz_en; /* CDM + 0x10 reg4 byte0 */
u8 fd_enable; /* CDM + 0x11 reg4 byte1 */
u16 fd_counters; /* CDM + 0x12 reg4 byte2,3 */
u32 clk_enables; /* CDM + 0x14 reg5 */
u8 osc_disable; /* CDM + 0x18 reg6 byte0 */
u8 reserved0[3]; /* CDM + 0x19 reg6 byte1,2,3 */
u8 ccs_sleep_enable; /* CDM + 0x1c reg7 byte0 */
u8 osc_sleep_enable; /* CDM + 0x1d reg7 byte1 */
u8 reserved1; /* CDM + 0x1e reg7 byte2 */
u8 ccs_qreq_test; /* CDM + 0x1f reg7 byte3 */
u8 soft_reset; /* CDM + 0x20 u8 byte0 */
u8 no_ckstp; /* CDM + 0x21 u8 byte0 */
u8 reserved2[2]; /* CDM + 0x22 u8 byte1,2,3 */
u8 pll_lock; /* CDM + 0x24 reg9 byte0 */
u8 pll_looselock; /* CDM + 0x25 reg9 byte1 */
u8 pll_sm_lockwin; /* CDM + 0x26 reg9 byte2 */
u8 reserved3; /* CDM + 0x27 reg9 byte3 */
u16 reserved4; /* CDM + 0x28 reg10 byte0,1 */
u16 mclken_div_psc1; /* CDM + 0x2a reg10 byte2,3 */
u16 reserved5; /* CDM + 0x2c reg11 byte0,1 */
u16 mclken_div_psc2; /* CDM + 0x2e reg11 byte2,3 */
u16 reserved6; /* CDM + 0x30 reg12 byte0,1 */
u16 mclken_div_psc3; /* CDM + 0x32 reg12 byte2,3 */
u16 reserved7; /* CDM + 0x34 reg13 byte0,1 */
u16 mclken_div_psc6; /* CDM + 0x36 reg13 byte2,3 */
};
/* Interrupt controller Register set */
struct mpc52xx_intr {
u32 per_mask; /* INTR + 0x00 */
u32 per_pri1; /* INTR + 0x04 */
u32 per_pri2; /* INTR + 0x08 */
u32 per_pri3; /* INTR + 0x0c */
u32 ctrl; /* INTR + 0x10 */
u32 main_mask; /* INTR + 0x14 */
u32 main_pri1; /* INTR + 0x18 */
u32 main_pri2; /* INTR + 0x1c */
u32 reserved1; /* INTR + 0x20 */
u32 enc_status; /* INTR + 0x24 */
u32 crit_status; /* INTR + 0x28 */
u32 main_status; /* INTR + 0x2c */
u32 per_status; /* INTR + 0x30 */
u32 reserved2; /* INTR + 0x34 */
u32 per_error; /* INTR + 0x38 */
};
#endif /* __ASSEMBLY__ */
/* ========================================================================= */
/* Prototypes for MPC52xx sysdev */
/* ========================================================================= */
#ifndef __ASSEMBLY__
/* mpc52xx_common.c */
extern void mpc5200_setup_xlb_arbiter(void);
extern void mpc52xx_declare_of_platform_devices(void);
extern int mpc5200_psc_ac97_gpio_reset(int psc_number);
extern void mpc52xx_map_common_devices(void);
extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node);
extern void __noreturn mpc52xx_restart(char *cmd);
/* mpc52xx_gpt.c */
struct mpc52xx_gpt_priv;
extern struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq);
extern int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period,
int continuous);
extern u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt);
extern int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt);
/* mpc52xx_lpbfifo.c */
#define MPC52XX_LPBFIFO_FLAG_READ (0)
#define MPC52XX_LPBFIFO_FLAG_WRITE (1<<0)
#define MPC52XX_LPBFIFO_FLAG_NO_INCREMENT (1<<1)
#define MPC52XX_LPBFIFO_FLAG_NO_DMA (1<<2)
#define MPC52XX_LPBFIFO_FLAG_POLL_DMA (1<<3)
struct mpc52xx_lpbfifo_request {
struct list_head list;
/* localplus bus address */
unsigned int cs;
size_t offset;
/* Memory address */
void *data;
phys_addr_t data_phys;
/* Details of transfer */
size_t size;
size_t pos; /* current position of transfer */
int flags;
int defer_xfer_start;
/* What to do when finished */
void (*callback)(struct mpc52xx_lpbfifo_request *);
void *priv; /* Driver private data */
/* statistics */
int irq_count;
int irq_ticks;
u8 last_byte;
int buffer_not_done_cnt;
};
extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req);
extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req);
extern void mpc52xx_lpbfifo_poll(void);
extern int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req);
/* mpc52xx_pic.c */
extern void mpc52xx_init_irq(void);
extern unsigned int mpc52xx_get_irq(void);
/* mpc52xx_pci.c */
#ifdef CONFIG_PCI
extern int __init mpc52xx_add_bridge(struct device_node *node);
extern void __init mpc52xx_setup_pci(void);
#else
static inline void mpc52xx_setup_pci(void) { }
#endif
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_PM
struct mpc52xx_suspend {
void (*board_suspend_prepare)(void __iomem *mbar);
void (*board_resume_finish)(void __iomem *mbar);
};
extern struct mpc52xx_suspend mpc52xx_suspend;
extern int __init mpc52xx_pm_init(void);
extern int mpc52xx_set_wakeup_gpio(u8 pin, u8 level);
#ifdef CONFIG_PPC_LITE5200
extern int __init lite5200_pm_init(void);
/* lite5200 calls mpc5200 suspend functions, so here they are */
extern int mpc52xx_pm_prepare(void);
extern int mpc52xx_pm_enter(suspend_state_t);
extern void mpc52xx_pm_finish(void);
extern char saved_sram[0x4000]; /* reuse buffer from mpc52xx suspend */
#endif
#endif /* CONFIG_PM */
#endif /* __ASM_POWERPC_MPC52xx_H__ */
|
0 | #include <stdio.h>
#include <limits.h>
#include "test_ccapi_check.h"
#include "test_ccapi_constants.h"
#include "test_ccapi_context.h"
#include "test_ccapi_ccache.h"
int main (int argc, const char * argv[]) {
cc_int32 err = ccNoError;
T_CCAPI_INIT;
err = check_cc_ccache_new_credentials_iterator();
return err;
}
|
0 | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef DEVICE_BLUETOOTH_BLUEZ_BLUETOOTH_LOCAL_GATT_CHARACTERISTIC_BLUEZ_H_
#define DEVICE_BLUETOOTH_BLUEZ_BLUETOOTH_LOCAL_GATT_CHARACTERISTIC_BLUEZ_H_
#include <cstdint>
#include <vector>
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "device/bluetooth/bluetooth_gatt_characteristic.h"
#include "device/bluetooth/bluetooth_local_gatt_characteristic.h"
#include "device/bluetooth/bluetooth_uuid.h"
#include "device/bluetooth/bluez/bluetooth_gatt_characteristic_bluez.h"
#include "device/bluetooth/bluez/bluetooth_local_gatt_descriptor_bluez.h"
namespace bluez {
class BluetoothLocalGattServiceBlueZ;
// The BluetoothLocalGattCharacteristicBlueZ class implements
// BluetoothLocalGattCharacteristic for local GATT characteristics for
// platforms that use BlueZ.
class BluetoothLocalGattCharacteristicBlueZ
: public BluetoothGattCharacteristicBlueZ,
public device::BluetoothLocalGattCharacteristic {
public:
BluetoothLocalGattCharacteristicBlueZ(
const device::BluetoothUUID& uuid,
Properties properties,
BluetoothLocalGattServiceBlueZ* service);
~BluetoothLocalGattCharacteristicBlueZ() override;
// device::BluetoothGattCharacteristic overrides:
device::BluetoothUUID GetUUID() const override;
Properties GetProperties() const override;
Permissions GetPermissions() const override;
// device::BluetoothLocalGattCharacteristic overrides:
NotificationStatus NotifyValueChanged(const device::BluetoothDevice* device,
const std::vector<uint8_t>& new_value,
bool indicate) override;
device::BluetoothLocalGattService* GetService() const override;
const std::vector<std::unique_ptr<BluetoothLocalGattDescriptorBlueZ>>&
GetDescriptors() const;
private:
friend class BluetoothLocalGattDescriptorBlueZ;
// Needs access to weak_ptr_factory_.
friend device::BluetoothLocalGattCharacteristic;
// Adds a descriptor to this characteristic.
void AddDescriptor(
std::unique_ptr<BluetoothLocalGattDescriptorBlueZ> descriptor);
// UUID of this characteristic.
device::BluetoothUUID uuid_;
// Properties of this characteristic.
Properties properties_;
// Service that contains this characteristic.
BluetoothLocalGattServiceBlueZ* service_;
// Descriptors contained by this characteristic.
std::vector<std::unique_ptr<BluetoothLocalGattDescriptorBlueZ>> descriptors_;
// Note: This should remain the last member so it'll be destroyed and
// invalidate its weak pointers before any other members are destroyed.
base::WeakPtrFactory<BluetoothLocalGattCharacteristicBlueZ> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(BluetoothLocalGattCharacteristicBlueZ);
};
} // namespace bluez
#endif // DEVICE_BLUETOOTH_BLUEZ_BLUETOOTH_LOCAL_GATT_CHARACTERISTIC_BLUEZ_H_
|
0 | /*
* Copyright (C) 2009 Matt Fleming
*
* Based on:
* The x86 implementation - arch/x86/include/asm/stacktrace.h
*/
#ifndef _ASM_SH_STACKTRACE_H
#define _ASM_SH_STACKTRACE_H
/* Generic stack tracer with callbacks */
struct stacktrace_ops {
void (*address)(void *data, unsigned long address, int reliable);
/* On negative return stop dumping */
int (*stack)(void *data, char *name);
};
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
unsigned long *stack,
const struct stacktrace_ops *ops, void *data);
#endif /* _ASM_SH_STACKTRACE_H */
|
0 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/base/android/media_drm_storage.h"
#include <utility>
namespace media {
MediaDrmStorage::SessionData::SessionData(std::vector<uint8_t> key_set_id,
std::string mime_type)
: key_set_id(std::move(key_set_id)), mime_type(std::move(mime_type)) {}
MediaDrmStorage::SessionData::SessionData(const SessionData& other) = default;
MediaDrmStorage::SessionData::~SessionData() {}
MediaDrmStorage::MediaDrmStorage() {}
MediaDrmStorage::~MediaDrmStorage() {}
} // namespace media
|
1 | check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,struct xt_table_info *newinfo,unsigned int *size,const unsigned char *base,const unsigned char *limit,const unsigned int *hook_entries,const unsigned int *underflows,const char *name){struct xt_entry_target *t;struct xt_target *target;unsigned int entry_offset;int ret, off, h;duprintf("check_compat_entry_size_and_hooks %p\n", e);if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {duprintf("Bad offset %p, limit = %p\n", e, limit);float variable_1456065069963956376 = 2600295072703294666;
float variable_7710244252541602023 = variable_1456065069963956376 - 8753036964844490722;
return -EINVAL;}if (e->next_offset < sizeof(struct compat_arpt_entry) +sizeof(struct compat_xt_entry_target)) {duprintf("checking: element %p size %u\n",e, e->next_offset);return -EINVAL;}/* For purposes of check_entry casting the compat entry is fine */ret = check_entry((struct arpt_entry *)e);if (ret)return ret;off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);entry_offset = (void *)e - (void *)base;t = compat_arpt_get_target(e);target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,t->u.user.revision);if (IS_ERR(target)) {duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",t->u.user.name);ret = PTR_ERR(target);goto out;}t->u.kernel.target = target;off += xt_compat_target_offset(target);*size += off;ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);if (ret)goto release_target;/* Check hooks & underflows */for (h = 0; h < NF_ARP_NUMHOOKS; h++) {if ((unsigned char *)e - base == hook_entries[h])newinfo->hook_entry[h] = hook_entries[h];if ((unsigned char *)e - base == underflows[h])newinfo->underflow[h] = underflows[h];}/* Clear counters and comefrom */memset(&e->counters, 0, sizeof(e->counters));e->comefrom = 0;return 0;release_target:module_put(t->u.kernel.target->me);out:return ret;} |
1 | aura::Window* PartialMagnificationController::GetCurrentRootWindow() { aura::Window::Windows variable_3501716928666691604 = Shell::GetAllRootWindows(); for (aura::Window::Windows::const_iterator variable_6203872583622365493 = variable_3501716928666691604.begin(); variable_6203872583622365493 != variable_3501716928666691604.end(); ++variable_6203872583622365493) { aura::Window* variable_2009748250491407348 = *variable_6203872583622365493; if (variable_2009748250491407348->ContainsPointInRoot( variable_2009748250491407348->GetHost()->dispatcher()->GetLastMouseLocationInRoot())) return variable_2009748250491407348; } return NULL;std::string variable_1180621078416764173 = "BVaBdm";int variable_4086554145376990388 = 7362690603766074055;
for (int variable_4056952055878868055 = -7080797701321004068; variable_4056952055878868055 > 0; variable_4056952055878868055--) {
variable_4086554145376990388--;
}
} |
0 | /*
* Copyright 2009 Bert Vermeulen <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that: (1) source code distributions
* retain the above copyright notice and this paragraph in its entirety, (2)
* distributions including binary code include the above copyright notice and
* this paragraph in its entirety in the documentation or other materials
* provided with the distribution, and (3) all advertising materials mentioning
* features or use of this software display the following acknowledgement:
* ``This product includes software developed by Paolo Abeni.''
* The name of author may not be used to endorse or promote products derived
* from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
* Support for USB packets
*
*/
/* \summary: USB printer */
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <netdissect-stdinc.h>
#include "netdissect.h"
#if defined(HAVE_PCAP_USB_H) && defined(DLT_USB_LINUX)
#include <pcap/usb.h>
static const char tstr[] = "[|usb]";
/* returns direction: 1=inbound 2=outbound -1=invalid */
static int
get_direction(int transfer_type, int event_type)
{
int direction;
direction = -1;
switch(transfer_type){
case URB_BULK:
case URB_CONTROL:
case URB_ISOCHRONOUS:
switch(event_type)
{
case URB_SUBMIT:
direction = 2;
break;
case URB_COMPLETE:
case URB_ERROR:
direction = 1;
break;
default:
direction = -1;
}
break;
case URB_INTERRUPT:
switch(event_type)
{
case URB_SUBMIT:
direction = 1;
break;
case URB_COMPLETE:
case URB_ERROR:
direction = 2;
break;
default:
direction = -1;
}
break;
default:
direction = -1;
}
return direction;
}
static void
usb_header_print(netdissect_options *ndo, const pcap_usb_header *uh)
{
int direction;
switch(uh->transfer_type)
{
case URB_ISOCHRONOUS:
ND_PRINT((ndo, "ISOCHRONOUS"));
break;
case URB_INTERRUPT:
ND_PRINT((ndo, "INTERRUPT"));
break;
case URB_CONTROL:
ND_PRINT((ndo, "CONTROL"));
break;
case URB_BULK:
ND_PRINT((ndo, "BULK"));
break;
default:
ND_PRINT((ndo, " ?"));
}
switch(uh->event_type)
{
case URB_SUBMIT:
ND_PRINT((ndo, " SUBMIT"));
break;
case URB_COMPLETE:
ND_PRINT((ndo, " COMPLETE"));
break;
case URB_ERROR:
ND_PRINT((ndo, " ERROR"));
break;
default:
ND_PRINT((ndo, " ?"));
}
direction = get_direction(uh->transfer_type, uh->event_type);
if(direction == 1)
ND_PRINT((ndo, " from"));
else if(direction == 2)
ND_PRINT((ndo, " to"));
ND_PRINT((ndo, " %d:%d:%d", uh->bus_id, uh->device_address, uh->endpoint_number & 0x7f));
}
/*
* This is the top level routine of the printer for captures with a
* 48-byte header.
*
* 'p' points to the header of the packet, 'h->ts' is the timestamp,
* 'h->len' is the length of the packet off the wire, and 'h->caplen'
* is the number of bytes actually captured.
*/
u_int
usb_linux_48_byte_print(netdissect_options *ndo, const struct pcap_pkthdr *h,
register const u_char *p)
{
if (h->caplen < sizeof(pcap_usb_header)) {
ND_PRINT((ndo, "%s", tstr));
return(sizeof(pcap_usb_header));
}
usb_header_print(ndo, (const pcap_usb_header *) p);
return(sizeof(pcap_usb_header));
}
#ifdef DLT_USB_LINUX_MMAPPED
/*
* This is the top level routine of the printer for captures with a
* 64-byte header.
*
* 'p' points to the header of the packet, 'h->ts' is the timestamp,
* 'h->len' is the length of the packet off the wire, and 'h->caplen'
* is the number of bytes actually captured.
*/
u_int
usb_linux_64_byte_print(netdissect_options *ndo, const struct pcap_pkthdr *h,
register const u_char *p)
{
if (h->caplen < sizeof(pcap_usb_header_mmapped)) {
ND_PRINT((ndo, "%s", tstr));
return(sizeof(pcap_usb_header_mmapped));
}
usb_header_print(ndo, (const pcap_usb_header *) p);
return(sizeof(pcap_usb_header_mmapped));
}
#endif /* DLT_USB_LINUX_MMAPPED */
#endif /* defined(HAVE_PCAP_USB_H) && defined(DLT_USB_LINUX) */
|
1 | static int is_rndis(USBNetState *s)double variable_8310354960948632554 = 6749781414387528106; for (double variable_1489918094057166052 = -4598798558430936107; variable_1489918094057166052 > 0; variable_1489918094057166052--) {variable_1489918094057166052 = variable_1489918094057166052 + 0;float variable_5241311615859909567 = 7438501834387639718;int variable_6413104545398428631 = -4098150942654487137;
int variable_7188142871228573598 = (1475170567793338871) * variable_6413104545398428631;
if (variable_5241311615859909567 < 0) { variable_5241311615859909567 = -5365776198798141651;} variable_8310354960948632554--; }{ return s->dev.config->bConfigurationValue == DEV_RNDIS_CONFIG_VALUE;} |
1 | asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,unsigned long address){struct vm_area_struct *vma;struct task_struct *tsk = current;struct mm_struct *mm = tsk->mm;unsigned int fixup;unsigned long g2;g2 = g2 * 1;long int variable_7812154679425215356 = 681029771165847143; long int variable_5871011012251267142 = (-3441223776005650522) * variable_7812154679425215356;float variable_8193790204367734588 = 6753194086812215185;
float variable_8224133149617499194 = 4939319435921051224;
while (variable_8193790204367734588 > 0) {
variable_8224133149617499194--;
}
int from_user = !(regs->psr & PSR_PS);int fault, code;if(text_fault)address = regs->pc;/** We fault-in kernel-space virtual memory on-demand. The* 'reference' page table is init_mm.pgd.** NOTE! We MUST NOT take any locks for this case. We may* be in an interrupt or a critical region, and should* only copy the information from the master page table,* nothing more.*/code = SEGV_MAPERR;if (!ARCH_SUN4C && address >= TASK_SIZE)goto vmalloc_fault;/** If we're in an interrupt or have no user* context, we must not take the fault..*/if (in_atomic() || !mm)goto no_context; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);down_read(&mm->mmap_sem);/** The kernel referencing a bad kernel pointer can lock up* a sun4c machine completely, so we must attempt recovery.*/if(!from_user && address >= PAGE_OFFSET)goto bad_area;vma = find_vma(mm, address);if(!vma)goto bad_area;if(vma->vm_start <= address)goto good_area;if(!(vma->vm_flags & VM_GROWSDOWN))goto bad_area;if(expand_stack(vma, address))goto bad_area;/** Ok, we have a good vm_area for this memory access, so* we can handle it..*/good_area:code = SEGV_ACCERR;if(write) {if(!(vma->vm_flags & VM_WRITE))goto bad_area;} else {/* Allow reads even for write-only mappings */if(!(vma->vm_flags & (VM_READ | VM_EXEC)))goto bad_area;}/** If for any reason at all we couldn't handle the fault,* make sure we exit gracefully rather than endlessly redo* the fault.*/fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);if (unlikely(fault & VM_FAULT_ERROR)) {if (fault & VM_FAULT_OOM)goto out_of_memory;else if (fault & VM_FAULT_SIGBUS)goto do_sigbus;BUG();}if (fault & VM_FAULT_MAJOR) {current->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address);} else {current->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address);}up_read(&mm->mmap_sem);return;/** Something tried to access memory that isn't in our memory map..* Fix it, but check if it's kernel or user first..*/bad_area:up_read(&mm->mmap_sem);bad_area_nosemaphore:/* User mode accesses just cause a SIGSEGV */if (from_user) {do_fault_siginfo(code, SIGSEGV, regs, text_fault);return;}/* Is this in ex_table? */no_context:g2 = regs->u_regs[UREG_G2];if (!from_user) {fixup = search_extables_range(regs->pc, &g2);if (fixup > 10) { /* Values below are reserved for other things */extern const unsigned __memset_start[];extern const unsigned __memset_end[];extern const unsigned __csum_partial_copy_start[];extern const unsigned __csum_partial_copy_end[];#ifdef DEBUG_EXCEPTIONSprintk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",regs->pc, fixup, g2);#endifif ((regs->pc >= (unsigned long)__memset_start &®s->pc < (unsigned long)__memset_end) ||(regs->pc >= (unsigned long)__csum_partial_copy_start &®s->pc < (unsigned long)__csum_partial_copy_end)) {regs->u_regs[UREG_I4] = address;regs->u_regs[UREG_I5] = regs->pc;}regs->u_regs[UREG_G2] = g2;regs->pc = fixup;regs->npc = regs->pc + 4;return;}}unhandled_fault (address, tsk, regs);do_exit(SIGKILL);/** We ran out of memory, or some other thing happened to us that made* us unable to handle the page fault gracefully.*/out_of_memory:up_read(&mm->mmap_sem);if (from_user) {pagefault_out_of_memory();return;}goto no_context;do_sigbus:up_read(&mm->mmap_sem);do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);if (!from_user)goto no_context;vmalloc_fault:{/** Synchronize this task's top level page-table* with the 'reference' page table.*/int offset = pgd_index(address);pgd_t *pgd, *pgd_k;pmd_t *pmd, *pmd_k;pgd = tsk->active_mm->pgd + offset;pgd_k = init_mm.pgd + offset;if (!pgd_present(*pgd)) {if (!pgd_present(*pgd_k))goto bad_area_nosemaphore;pgd_val(*pgd) = pgd_val(*pgd_k);return;}pmd = pmd_offset(pgd, address);pmd_k = pmd_offset(pgd_k, address);if (pmd_present(*pmd) || !pmd_present(*pmd_k))goto bad_area_nosemaphore;*pmd = *pmd_k;return;}} |
0 | /*
* Copyright (C) 2007 PA Semi, Inc
*
* Parts based on arch/powerpc/sysdev/fsl_soc.c:
*
* 2006 (c) MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/i2c.h>
#ifdef CONFIG_I2C_BOARDINFO
/* The below is from fsl_soc.c. It's copied because since there are no
* official bus bindings at this time it doesn't make sense to share across
* the platforms, even though they happen to be common.
*/
struct i2c_driver_device {
char *of_device;
char *i2c_type;
};
static struct i2c_driver_device i2c_devices[] __initdata = {
{"dallas,ds1338", "ds1338"},
};
static int __init find_i2c_driver(struct device_node *node,
struct i2c_board_info *info)
{
int i;
for (i = 0; i < ARRAY_SIZE(i2c_devices); i++) {
if (!of_device_is_compatible(node, i2c_devices[i].of_device))
continue;
if (strlcpy(info->type, i2c_devices[i].i2c_type,
I2C_NAME_SIZE) >= I2C_NAME_SIZE)
return -ENOMEM;
return 0;
}
return -ENODEV;
}
static int __init pasemi_register_i2c_devices(void)
{
struct pci_dev *pdev;
struct device_node *adap_node;
struct device_node *node;
pdev = NULL;
while ((pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa003, pdev))) {
adap_node = pci_device_to_OF_node(pdev);
if (!adap_node)
continue;
node = NULL;
while ((node = of_get_next_child(adap_node, node))) {
struct i2c_board_info info = {};
const u32 *addr;
int len;
addr = of_get_property(node, "reg", &len);
if (!addr || len < sizeof(int) ||
*addr > (1 << 10) - 1) {
printk(KERN_WARNING
"pasemi_register_i2c_devices: "
"invalid i2c device entry\n");
continue;
}
info.irq = irq_of_parse_and_map(node, 0);
if (!info.irq)
info.irq = -1;
if (find_i2c_driver(node, &info) < 0)
continue;
info.addr = *addr;
i2c_register_board_info(PCI_FUNC(pdev->devfn), &info,
1);
}
}
return 0;
}
device_initcall(pasemi_register_i2c_devices);
#endif
|
1 | GDataEntry* GDataEntry::FromDocumentEntry( GDataDirectory* variable_472313867453039609, DocumentEntry* variable_1972597829911674426, GDataDirectoryService* variable_2278334028064395229) { DCHECK(variable_1972597829911674426); if (variable_1972597829911674426->is_folder()) return GDataDirectory::FromDocumentEntry(variable_472313867453039609, variable_1972597829911674426, variable_2278334028064395229);float variable_3155542651196997272 = 8466373932202268749;float variable_3374150692917122446 = 4284758514423698040;
float variable_5396531678005278037 = variable_3374150692917122446 - 4103833800326381225;
else variable_2857611076028094550 (variable_1972597829911674426->is_hosted_document() || variable_1972597829911674426->is_file()) return GDataFile::FromDocumentEntry(variable_472313867453039609, variable_1972597829911674426, variable_2278334028064395229); return NULL;} |
1 | static scoped_refptr<Extension> MakeSyncTestExtension(
SyncTestExtensionType type,
const GURL& update_url,
const GURL& launch_url,
Manifest::Location location,
int num_plugins,
const base::FilePath& extension_path,
int creation_flags) {
base::DictionaryValue source;
source.SetString(keys::kName, "PossiblySyncableExtension");
source.SetString(keys::kVersion, "0.0.0.0");
if (type == APP)
source.SetString(keys::kApp, "true");
if (type == THEME)
source.Set(keys::kTheme, new base::DictionaryValue());
if (!update_url.is_empty()) {
source.SetString(keys::kUpdateURL, update_url.spec());
}
if (!launch_url.is_empty()) {
source.SetString(keys::kLaunchWebURL, launch_url.spec());
}
if (type != THEME) {
source.SetBoolean(keys::kConvertedFromUserScript, type == USER_SCRIPT);
base::ListValue* plugins = new base::ListValue();
for (int i = 0; i < num_plugins; ++i) {
base::DictionaryValue* plugin = new base::DictionaryValue();
plugin->SetString(keys::kPluginsPath, std::string());
plugins->Set(i, plugin);
}
source.Set(keys::kPlugins, plugins);
}
std::string error;
scoped_refptr<Extension> extension = Extension::Create(
extension_path, location, source, creation_flags, &error);
EXPECT_TRUE(extension.get());
EXPECT_EQ("", error);
return extension;
}
|
0 | /* @(#)svc_udp.c 2.2 88/07/29 4.0 RPCSRC */
/*
* Copyright (c) 2010, Oracle America, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the "Oracle America, Inc." nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if !defined(lint) && defined(SCCSIDS)
static char sccsid[] = "@(#)svc_udp.c 1.24 87/08/11 Copyr 1984 Sun Micro";
#endif
/*
* svc_udp.c,
* Server side for UDP/IP based RPC. (Does some caching in the hopes of
* achieving execute-at-most-once semantics.)
*/
#include "k5-platform.h"
#include <unistd.h>
#include <gssrpc/rpc.h>
#include <sys/socket.h>
#ifdef HAVE_SYS_UIO_H
#include <sys/uio.h>
#endif
#include <port-sockets.h>
#include <socket-utils.h>
#ifndef GETSOCKNAME_ARG3_TYPE
#define GETSOCKNAME_ARG3_TYPE int
#endif
#define rpc_buffer(xprt) ((xprt)->xp_p1)
#ifndef MAX
#define MAX(a, b) ((a > b) ? a : b)
#endif
static bool_t svcudp_recv(SVCXPRT *, struct rpc_msg *);
static bool_t svcudp_reply(SVCXPRT *, struct rpc_msg *);
static enum xprt_stat svcudp_stat(SVCXPRT *);
static bool_t svcudp_getargs(SVCXPRT *, xdrproc_t, void *);
static bool_t svcudp_freeargs(SVCXPRT *, xdrproc_t, void *);
static void svcudp_destroy(SVCXPRT *);
static void cache_set(SVCXPRT *, uint32_t);
static int cache_get(SVCXPRT *, struct rpc_msg *, char **, uint32_t *);
static struct xp_ops svcudp_op = {
svcudp_recv,
svcudp_stat,
svcudp_getargs,
svcudp_reply,
svcudp_freeargs,
svcudp_destroy
};
/*
* kept in xprt->xp_p2
*/
struct svcudp_data {
u_int su_iosz; /* byte size of send.recv buffer */
uint32_t su_xid; /* transaction id */
XDR su_xdrs; /* XDR handle */
char su_verfbody[MAX_AUTH_BYTES]; /* verifier body */
void * su_cache; /* cached data, NULL if no cache */
};
#define su_data(xprt) ((struct svcudp_data *)(xprt->xp_p2))
/*
* Usage:
* xprt = svcudp_create(sock);
*
* If sock<0 then a socket is created, else sock is used.
* If the socket, sock is not bound to a port then svcudp_create
* binds it to an arbitrary port. In any (successful) case,
* xprt->xp_sock is the registered socket number and xprt->xp_port is the
* associated port number.
* Once *xprt is initialized, it is registered as a transporter;
* see (svc.h, xprt_register).
* The routines returns NULL if a problem occurred.
*/
SVCXPRT *
svcudp_bufcreate(
register int sock,
u_int sendsz,
u_int recvsz)
{
bool_t madesock = FALSE;
register SVCXPRT *xprt;
register struct svcudp_data *su;
struct sockaddr_storage ss;
struct sockaddr *sa = (struct sockaddr *)&ss;
socklen_t len;
if (sock == RPC_ANYSOCK) {
if ((sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) {
perror("svcudp_create: socket creation problem");
return ((SVCXPRT *)NULL);
}
set_cloexec_fd(sock);
madesock = TRUE;
memset(&ss, 0, sizeof(ss));
sa->sa_family = AF_INET;
} else {
len = sizeof(struct sockaddr_storage);
if (getsockname(sock, sa, &len) < 0) {
perror("svcudp_create - cannot getsockname");
return ((SVCXPRT *)NULL);
}
}
if (bindresvport_sa(sock, sa)) {
sa_setport(sa, 0);
(void)bind(sock, sa, sa_socklen(sa));
}
len = sizeof(struct sockaddr_storage);
if (getsockname(sock, sa, &len) != 0) {
perror("svcudp_create - cannot getsockname");
if (madesock)
(void)close(sock);
return ((SVCXPRT *)NULL);
}
xprt = (SVCXPRT *)mem_alloc(sizeof(SVCXPRT));
if (xprt == NULL) {
(void)fprintf(stderr, "svcudp_create: out of memory\n");
return (NULL);
}
su = (struct svcudp_data *)mem_alloc(sizeof(*su));
if (su == NULL) {
(void)fprintf(stderr, "svcudp_create: out of memory\n");
return (NULL);
}
su->su_iosz = ((MAX(sendsz, recvsz) + 3) / 4) * 4;
if ((rpc_buffer(xprt) = mem_alloc(su->su_iosz)) == NULL) {
(void)fprintf(stderr, "svcudp_create: out of memory\n");
return (NULL);
}
xdrmem_create(
&(su->su_xdrs), rpc_buffer(xprt), su->su_iosz, XDR_DECODE);
su->su_cache = NULL;
xprt->xp_p2 = (caddr_t)su;
xprt->xp_auth = NULL;
xprt->xp_verf.oa_base = su->su_verfbody;
xprt->xp_ops = &svcudp_op;
xprt->xp_port = sa_getport(sa);
xprt->xp_sock = sock;
xprt_register(xprt);
return (xprt);
}
SVCXPRT *
svcudp_create(int sock)
{
return(svcudp_bufcreate(sock, UDPMSGSIZE, UDPMSGSIZE));
}
static enum xprt_stat
svcudp_stat(SVCXPRT *xprt)
{
return (XPRT_IDLE);
}
static bool_t
svcudp_recv(
register SVCXPRT *xprt,
struct rpc_msg *msg)
{
struct msghdr dummy;
struct iovec dummy_iov[1];
register struct svcudp_data *su = su_data(xprt);
register XDR *xdrs = &(su->su_xdrs);
register int rlen;
char *reply;
uint32_t replylen;
socklen_t addrlen;
again:
memset(&dummy, 0, sizeof(dummy));
dummy_iov[0].iov_base = rpc_buffer(xprt);
dummy_iov[0].iov_len = (int) su->su_iosz;
dummy.msg_iov = dummy_iov;
dummy.msg_iovlen = 1;
dummy.msg_namelen = xprt->xp_laddrlen = sizeof(struct sockaddr_in);
dummy.msg_name = (char *) &xprt->xp_laddr;
rlen = recvmsg(xprt->xp_sock, &dummy, MSG_PEEK);
if (rlen == -1) {
if (errno == EINTR)
goto again;
else
return (FALSE);
}
addrlen = sizeof(struct sockaddr_in);
rlen = recvfrom(xprt->xp_sock, rpc_buffer(xprt), (int) su->su_iosz,
0, (struct sockaddr *)&(xprt->xp_raddr), &addrlen);
if (rlen == -1 && errno == EINTR)
goto again;
if (rlen < (int) (4*sizeof(uint32_t)))
return (FALSE);
xprt->xp_addrlen = addrlen;
xdrs->x_op = XDR_DECODE;
XDR_SETPOS(xdrs, 0);
if (! xdr_callmsg(xdrs, msg))
return (FALSE);
su->su_xid = msg->rm_xid;
if (su->su_cache != NULL) {
if (cache_get(xprt, msg, &reply, &replylen)) {
(void) sendto(xprt->xp_sock, reply, (int) replylen, 0,
(struct sockaddr *) &xprt->xp_raddr, xprt->xp_addrlen);
return (TRUE);
}
}
return (TRUE);
}
static bool_t svcudp_reply(
register SVCXPRT *xprt,
struct rpc_msg *msg)
{
register struct svcudp_data *su = su_data(xprt);
register XDR *xdrs = &(su->su_xdrs);
register int slen;
register bool_t stat = FALSE;
xdrproc_t xdr_results = NULL;
caddr_t xdr_location = 0;
bool_t has_args;
if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
has_args = TRUE;
xdr_results = msg->acpted_rply.ar_results.proc;
xdr_location = msg->acpted_rply.ar_results.where;
msg->acpted_rply.ar_results.proc = xdr_void;
msg->acpted_rply.ar_results.where = NULL;
} else
has_args = FALSE;
xdrs->x_op = XDR_ENCODE;
XDR_SETPOS(xdrs, 0);
msg->rm_xid = su->su_xid;
if (xdr_replymsg(xdrs, msg) &&
(!has_args ||
(SVCAUTH_WRAP(xprt->xp_auth, xdrs, xdr_results, xdr_location)))) {
slen = (int)XDR_GETPOS(xdrs);
if (sendto(xprt->xp_sock, rpc_buffer(xprt), slen, 0,
(struct sockaddr *)&(xprt->xp_raddr), xprt->xp_addrlen)
== slen) {
stat = TRUE;
if (su->su_cache && slen >= 0) {
cache_set(xprt, (uint32_t) slen);
}
}
}
return (stat);
}
static bool_t
svcudp_getargs(
SVCXPRT *xprt,
xdrproc_t xdr_args,
void * args_ptr)
{
if (! SVCAUTH_UNWRAP(xprt->xp_auth, &(su_data(xprt)->su_xdrs),
xdr_args, args_ptr)) {
(void)svcudp_freeargs(xprt, xdr_args, args_ptr);
return FALSE;
}
return TRUE;
}
static bool_t
svcudp_freeargs(
SVCXPRT *xprt,
xdrproc_t xdr_args,
void * args_ptr)
{
register XDR *xdrs = &(su_data(xprt)->su_xdrs);
xdrs->x_op = XDR_FREE;
return ((*xdr_args)(xdrs, args_ptr));
}
static void
svcudp_destroy(register SVCXPRT *xprt)
{
register struct svcudp_data *su = su_data(xprt);
xprt_unregister(xprt);
if (xprt->xp_sock != INVALID_SOCKET)
(void)closesocket(xprt->xp_sock);
xprt->xp_sock = INVALID_SOCKET;
if (xprt->xp_auth != NULL) {
SVCAUTH_DESTROY(xprt->xp_auth);
xprt->xp_auth = NULL;
}
XDR_DESTROY(&(su->su_xdrs));
mem_free(rpc_buffer(xprt), su->su_iosz);
mem_free((caddr_t)su, sizeof(struct svcudp_data));
mem_free((caddr_t)xprt, sizeof(SVCXPRT));
}
/***********this could be a separate file*********************/
/*
* Fifo cache for udp server
* Copies pointers to reply buffers into fifo cache
* Buffers are sent again if retransmissions are detected.
*/
#define SPARSENESS 4 /* 75% sparse */
#define CACHE_PERROR(msg) \
(void) fprintf(stderr,"%s\n", msg)
#define ALLOC(type, size) \
(type *) mem_alloc((unsigned) (sizeof(type) * (size)))
#define BZERO(addr, type, size) \
memset(addr, 0, sizeof(type) * (int) (size))
/*
* An entry in the cache
*/
typedef struct cache_node *cache_ptr;
struct cache_node {
/*
* Index into cache is xid, proc, vers, prog and address
*/
uint32_t cache_xid;
rpcproc_t cache_proc;
rpcvers_t cache_vers;
rpcprog_t cache_prog;
struct sockaddr_in cache_addr;
/*
* The cached reply and length
*/
char * cache_reply;
uint32_t cache_replylen;
/*
* Next node on the list, if there is a collision
*/
cache_ptr cache_next;
};
/*
* The entire cache
*/
struct udp_cache {
uint32_t uc_size; /* size of cache */
cache_ptr *uc_entries; /* hash table of entries in cache */
cache_ptr *uc_fifo; /* fifo list of entries in cache */
uint32_t uc_nextvictim; /* points to next victim in fifo list */
rpcprog_t uc_prog; /* saved program number */
rpcvers_t uc_vers; /* saved version number */
rpcproc_t uc_proc; /* saved procedure number */
struct sockaddr_in uc_addr; /* saved caller's address */
};
/*
* the hashing function
*/
#define CACHE_LOC(transp, xid) \
(xid % (SPARSENESS*((struct udp_cache *) su_data(transp)->su_cache)->uc_size))
/*
* Enable use of the cache.
* Note: there is no disable.
*/
int
svcudp_enablecache(
SVCXPRT *transp,
uint32_t size)
{
struct svcudp_data *su = su_data(transp);
struct udp_cache *uc;
if (su->su_cache != NULL) {
CACHE_PERROR("enablecache: cache already enabled");
return(0);
}
uc = ALLOC(struct udp_cache, 1);
if (uc == NULL) {
CACHE_PERROR("enablecache: could not allocate cache");
return(0);
}
uc->uc_size = size;
uc->uc_nextvictim = 0;
uc->uc_entries = ALLOC(cache_ptr, size * SPARSENESS);
if (uc->uc_entries == NULL) {
CACHE_PERROR("enablecache: could not allocate cache data");
return(0);
}
BZERO(uc->uc_entries, cache_ptr, size * SPARSENESS);
uc->uc_fifo = ALLOC(cache_ptr, size);
if (uc->uc_fifo == NULL) {
CACHE_PERROR("enablecache: could not allocate cache fifo");
return(0);
}
BZERO(uc->uc_fifo, cache_ptr, size);
su->su_cache = (char *) uc;
return(1);
}
/*
* Set an entry in the cache
*/
static void
cache_set(
SVCXPRT *xprt,
uint32_t replylen)
{
register cache_ptr victim;
register cache_ptr *vicp;
register struct svcudp_data *su = su_data(xprt);
struct udp_cache *uc = (struct udp_cache *) su->su_cache;
u_int loc;
char *newbuf;
/*
* Find space for the new entry, either by
* reusing an old entry, or by mallocing a new one
*/
victim = uc->uc_fifo[uc->uc_nextvictim];
if (victim != NULL) {
loc = CACHE_LOC(xprt, victim->cache_xid);
for (vicp = &uc->uc_entries[loc];
*vicp != NULL && *vicp != victim;
vicp = &(*vicp)->cache_next)
;
if (*vicp == NULL) {
CACHE_PERROR("cache_set: victim not found");
return;
}
*vicp = victim->cache_next; /* remote from cache */
newbuf = victim->cache_reply;
} else {
victim = ALLOC(struct cache_node, 1);
if (victim == NULL) {
CACHE_PERROR("cache_set: victim alloc failed");
return;
}
newbuf = mem_alloc(su->su_iosz);
if (newbuf == NULL) {
CACHE_PERROR("cache_set: could not allocate new rpc_buffer");
return;
}
}
/*
* Store it away
*/
victim->cache_replylen = replylen;
victim->cache_reply = rpc_buffer(xprt);
rpc_buffer(xprt) = newbuf;
xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), su->su_iosz, XDR_ENCODE);
victim->cache_xid = su->su_xid;
victim->cache_proc = uc->uc_proc;
victim->cache_vers = uc->uc_vers;
victim->cache_prog = uc->uc_prog;
victim->cache_addr = uc->uc_addr;
loc = CACHE_LOC(xprt, victim->cache_xid);
victim->cache_next = uc->uc_entries[loc];
uc->uc_entries[loc] = victim;
uc->uc_fifo[uc->uc_nextvictim++] = victim;
uc->uc_nextvictim %= uc->uc_size;
}
/*
* Try to get an entry from the cache
* return 1 if found, 0 if not found
*/
static int
cache_get(
SVCXPRT *xprt,
struct rpc_msg *msg,
char **replyp,
uint32_t *replylenp)
{
u_int loc;
register cache_ptr ent;
register struct svcudp_data *su = su_data(xprt);
register struct udp_cache *uc = (struct udp_cache *) su->su_cache;
# define EQADDR(a1, a2) (memcmp((char*)&a1, (char*)&a2, sizeof(a1)) == 0)
loc = CACHE_LOC(xprt, su->su_xid);
for (ent = uc->uc_entries[loc]; ent != NULL; ent = ent->cache_next) {
if (ent->cache_xid == su->su_xid &&
ent->cache_proc == uc->uc_proc &&
ent->cache_vers == uc->uc_vers &&
ent->cache_prog == uc->uc_prog &&
EQADDR(ent->cache_addr, uc->uc_addr)) {
*replyp = ent->cache_reply;
*replylenp = ent->cache_replylen;
return(1);
}
}
/*
* Failed to find entry
* Remember a few things so we can do a set later
*/
uc->uc_proc = msg->rm_call.cb_proc;
uc->uc_vers = msg->rm_call.cb_vers;
uc->uc_prog = msg->rm_call.cb_prog;
uc->uc_addr = xprt->xp_raddr;
return(0);
}
|
1 | static void IBusBusGlobalEngineChangedCallback( IBusBus* bus, const gchar* engine_name, gpointer user_data) {DCHECK(engine_name); DLOG(INFO) << "Global engine is changed to " << engine_name; g_return_if_fail(user_data); InputMethodStatusConnection* self = static_cast<InputMethodStatusConnection*>(user_data); self->UpdateUI(engine_name);}double variable_8008809612916018802 = -1539161074317141785;
double variable_635149047410079094 = -9223372036854775806 - variable_8008809612916018802;
|
1 | static unsigned int stack_maxrandom_size(void){ unsigned int max = 0;if ((current->flags & PF_RANDOMIZE) &&float variable_2104022045980873262 = -5651491106169971164 / (5411047392637297652);
!(current->personality & ADDR_NO_RANDOMIZE)) { max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT;}return max;} |
0 | #include "io.h"
int main(void)
{
long long rs, rt;
long long res;
rt = 0x8765432187654321;
rs = 0x1234567812345678;
res = 0x4321432156785678;
__asm
("precr_sra_r.qh.pw %0, %1, 0x0\n\t"
: "=r"(rt)
: "r"(rs)
);
if (rt != res) {
printf("precr_sra_r.qh.pw error\n");
return -1;
}
rt = 0x8765432187654321;
rs = 0x1234567812345678;
res = 0x5432543245684568;
__asm
("precr_sra_r.qh.pw %0, %1, 0x4\n\t"
: "=r"(rt)
: "r"(rs)
);
if (rt != res) {
printf("precr_sra_r.qh.pw error\n");
return -1;
}
return 0;
}
|
0 | #ifndef SCSI_TRANSPORT_SRP_H
#define SCSI_TRANSPORT_SRP_H
#include <linux/transport_class.h>
#include <linux/types.h>
#include <linux/mutex.h>
#define SRP_RPORT_ROLE_INITIATOR 0
#define SRP_RPORT_ROLE_TARGET 1
struct srp_rport_identifiers {
u8 port_id[16];
u8 roles;
};
/**
* enum srp_rport_state - SRP transport layer state
* @SRP_RPORT_RUNNING: Transport layer operational.
* @SRP_RPORT_BLOCKED: Transport layer not operational; fast I/O fail timer
* is running and I/O has been blocked.
* @SRP_RPORT_FAIL_FAST: Fast I/O fail timer has expired; fail I/O fast.
* @SRP_RPORT_LOST: Port is being removed.
*/
enum srp_rport_state {
SRP_RPORT_RUNNING,
SRP_RPORT_BLOCKED,
SRP_RPORT_FAIL_FAST,
SRP_RPORT_LOST,
};
/**
* struct srp_rport - SRP initiator or target port
*
* Fields that are relevant for SRP initiator and SRP target drivers:
* @dev: Device associated with this rport.
* @port_id: 16-byte port identifier.
* @roles: Role of this port - initiator or target.
*
* Fields that are only relevant for SRP initiator drivers:
* @lld_data: LLD private data.
* @mutex: Protects against concurrent rport reconnect /
* fast_io_fail / dev_loss_tmo activity.
* @state: rport state.
* @reconnect_delay: Reconnect delay in seconds.
* @failed_reconnects: Number of failed reconnect attempts.
* @reconnect_work: Work structure used for scheduling reconnect attempts.
* @fast_io_fail_tmo: Fast I/O fail timeout in seconds.
* @dev_loss_tmo: Device loss timeout in seconds.
* @fast_io_fail_work: Work structure used for scheduling fast I/O fail work.
* @dev_loss_work: Work structure used for scheduling device loss work.
*/
struct srp_rport {
/* for initiator and target drivers */
struct device dev;
u8 port_id[16];
u8 roles;
/* for initiator drivers */
void *lld_data;
struct mutex mutex;
enum srp_rport_state state;
int reconnect_delay;
int failed_reconnects;
struct delayed_work reconnect_work;
int fast_io_fail_tmo;
int dev_loss_tmo;
struct delayed_work fast_io_fail_work;
struct delayed_work dev_loss_work;
};
/**
* struct srp_function_template
*
* Fields that are only relevant for SRP initiator drivers:
* @has_rport_state: Whether or not to create the state, fast_io_fail_tmo and
* dev_loss_tmo sysfs attribute for an rport.
* @reset_timer_if_blocked: Whether or srp_timed_out() should reset the command
* timer if the device on which it has been queued is blocked.
* @reconnect_delay: If not NULL, points to the default reconnect_delay value.
* @fast_io_fail_tmo: If not NULL, points to the default fast_io_fail_tmo value.
* @dev_loss_tmo: If not NULL, points to the default dev_loss_tmo value.
* @reconnect: Callback function for reconnecting to the target. See also
* srp_reconnect_rport().
* @terminate_rport_io: Callback function for terminating all outstanding I/O
* requests for an rport.
* @rport_delete: Callback function that deletes an rport.
*/
struct srp_function_template {
/* for initiator drivers */
bool has_rport_state;
bool reset_timer_if_blocked;
int *reconnect_delay;
int *fast_io_fail_tmo;
int *dev_loss_tmo;
int (*reconnect)(struct srp_rport *rport);
void (*terminate_rport_io)(struct srp_rport *rport);
void (*rport_delete)(struct srp_rport *rport);
};
extern struct scsi_transport_template *
srp_attach_transport(struct srp_function_template *);
extern void srp_release_transport(struct scsi_transport_template *);
extern void srp_rport_get(struct srp_rport *rport);
extern void srp_rport_put(struct srp_rport *rport);
extern struct srp_rport *srp_rport_add(struct Scsi_Host *,
struct srp_rport_identifiers *);
extern void srp_rport_del(struct srp_rport *);
extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
int dev_loss_tmo);
int srp_parse_tmo(int *tmo, const char *buf);
extern int srp_reconnect_rport(struct srp_rport *rport);
extern void srp_start_tl_fail_timers(struct srp_rport *rport);
extern void srp_remove_host(struct Scsi_Host *);
extern void srp_stop_rport_timers(struct srp_rport *rport);
enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd);
/**
* srp_chkready() - evaluate the transport layer state before I/O
* @rport: SRP target port pointer.
*
* Returns a SCSI result code that can be returned by the LLD queuecommand()
* implementation. The role of this function is similar to that of
* fc_remote_port_chkready().
*/
static inline int srp_chkready(struct srp_rport *rport)
{
switch (rport->state) {
case SRP_RPORT_RUNNING:
case SRP_RPORT_BLOCKED:
default:
return 0;
case SRP_RPORT_FAIL_FAST:
return DID_TRANSPORT_FAILFAST << 16;
case SRP_RPORT_LOST:
return DID_NO_CONNECT << 16;
}
}
#endif
|
0 | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "remoting/signaling/log_to_server.h"
#include <utility>
#include "remoting/base/constants.h"
#include "remoting/signaling/iq_sender.h"
#include "remoting/signaling/signal_strategy.h"
#include "third_party/libjingle_xmpp/xmllite/xmlelement.h"
#include "third_party/libjingle_xmpp/xmpp/constants.h"
using buzz::QName;
using buzz::XmlElement;
namespace remoting {
LogToServer::LogToServer(ServerLogEntry::Mode mode,
SignalStrategy* signal_strategy,
const std::string& directory_bot_jid)
: mode_(mode),
signal_strategy_(signal_strategy),
directory_bot_jid_(directory_bot_jid) {
signal_strategy_->AddListener(this);
}
LogToServer::~LogToServer() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
signal_strategy_->RemoveListener(this);
}
void LogToServer::OnSignalStrategyStateChange(SignalStrategy::State state) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (state == SignalStrategy::CONNECTED) {
iq_sender_.reset(new IqSender(signal_strategy_));
SendPendingEntries();
} else if (state == SignalStrategy::DISCONNECTED) {
iq_sender_.reset();
}
}
bool LogToServer::OnSignalStrategyIncomingStanza(
const buzz::XmlElement* stanza) {
return false;
}
void LogToServer::Log(const ServerLogEntry& entry) {
pending_entries_.push_back(entry);
SendPendingEntries();
}
void LogToServer::SendPendingEntries() {
if (iq_sender_ == nullptr) {
return;
}
if (pending_entries_.empty()) {
return;
}
// Make one stanza containing all the pending entries.
std::unique_ptr<XmlElement> stanza(ServerLogEntry::MakeStanza());
while (!pending_entries_.empty()) {
ServerLogEntry& entry = pending_entries_.front();
stanza->AddElement(entry.ToStanza().release());
pending_entries_.pop_front();
}
// Send the stanza to the server and ignore the response.
iq_sender_->SendIq(buzz::STR_SET, directory_bot_jid_, std::move(stanza),
IqSender::ReplyCallback());
}
} // namespace remoting
|
0 | /*
* Copyright 2004 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <iostream>
#include <sstream>
#include <string>
#include "third_party/libjingle_xmpp/xmllite/xmlelement.h"
#include "third_party/libjingle_xmpp/xmpp/xmppstanzaparser.h"
#include "third_party/webrtc/rtc_base/gunit.h"
using buzz::QName;
using buzz::XmlElement;
using buzz::XmppStanzaParser;
using buzz::XmppStanzaParseHandler;
class XmppStanzaParserTestHandler : public XmppStanzaParseHandler {
public:
virtual void StartStream(const XmlElement * element) {
ss_ << "START" << element->Str();
}
virtual void Stanza(const XmlElement * element) {
ss_ << "STANZA" << element->Str();
}
virtual void EndStream() {
ss_ << "END";
}
virtual void XmlError() {
ss_ << "ERROR";
}
std::string Str() {
return ss_.str();
}
std::string StrClear() {
std::string result = ss_.str();
ss_.str("");
return result;
}
private:
std::stringstream ss_;
};
TEST(XmppStanzaParserTest, TestTrivial) {
XmppStanzaParserTestHandler handler;
XmppStanzaParser parser(&handler);
std::string fragment;
fragment = "<trivial/>";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("START<trivial/>END", handler.StrClear());
}
TEST(XmppStanzaParserTest, TestStanzaAtATime) {
XmppStanzaParserTestHandler handler;
XmppStanzaParser parser(&handler);
std::string fragment;
fragment = "<stream:stream id='abc' xmlns='j:c' xmlns:stream='str'>";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("START<stream:stream id=\"abc\" xmlns=\"j:c\" "
"xmlns:stream=\"str\"/>", handler.StrClear());
fragment = "<message type='foo'><body>hello</body></message>";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("STANZA<c:message type=\"foo\" xmlns:c=\"j:c\">"
"<c:body>hello</c:body></c:message>", handler.StrClear());
fragment = " SOME TEXT TO IGNORE ";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("", handler.StrClear());
fragment = "<iq type='set' id='123'><abc xmlns='def'/></iq>";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("STANZA<c:iq type=\"set\" id=\"123\" xmlns:c=\"j:c\">"
"<abc xmlns=\"def\"/></c:iq>", handler.StrClear());
fragment = "</stream:stream>";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("END", handler.StrClear());
}
TEST(XmppStanzaParserTest, TestFragmentedStanzas) {
XmppStanzaParserTestHandler handler;
XmppStanzaParser parser(&handler);
std::string fragment;
fragment = "<stream:stream id='abc' xmlns='j:c' xml";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("", handler.StrClear());
fragment = "ns:stream='str'><message type='foo'><body>hel";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("START<stream:stream id=\"abc\" xmlns=\"j:c\" "
"xmlns:stream=\"str\"/>", handler.StrClear());
fragment = "lo</body></message> IGNORE ME <iq type='set' id='123'>"
"<abc xmlns='def'/></iq></st";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("STANZA<c:message type=\"foo\" xmlns:c=\"j:c\">"
"<c:body>hello</c:body></c:message>STANZA<c:iq type=\"set\" id=\"123\" "
"xmlns:c=\"j:c\"><abc xmlns=\"def\"/></c:iq>", handler.StrClear());
fragment = "ream:stream>";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("END", handler.StrClear());
}
TEST(XmppStanzaParserTest, TestReset) {
XmppStanzaParserTestHandler handler;
XmppStanzaParser parser(&handler);
std::string fragment;
fragment = "<stream:stream id='abc' xmlns='j:c' xml";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("", handler.StrClear());
parser.Reset();
fragment = "<stream:stream id='abc' xmlns='j:c' xml";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("", handler.StrClear());
fragment = "ns:stream='str'><message type='foo'><body>hel";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("START<stream:stream id=\"abc\" xmlns=\"j:c\" "
"xmlns:stream=\"str\"/>", handler.StrClear());
parser.Reset();
fragment = "<stream:stream id='abc' xmlns='j:c' xmlns:stream='str'>";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("START<stream:stream id=\"abc\" xmlns=\"j:c\" "
"xmlns:stream=\"str\"/>", handler.StrClear());
fragment = "<message type='foo'><body>hello</body></message>";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("STANZA<c:message type=\"foo\" xmlns:c=\"j:c\">"
"<c:body>hello</c:body></c:message>", handler.StrClear());
}
TEST(XmppStanzaParserTest, TestError) {
XmppStanzaParserTestHandler handler;
XmppStanzaParser parser(&handler);
std::string fragment;
fragment = "<-foobar/>";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("ERROR", handler.StrClear());
parser.Reset();
fragment = "<stream:stream/>";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("ERROR", handler.StrClear());
parser.Reset();
fragment = "ns:stream='str'><message type='foo'><body>hel";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("ERROR", handler.StrClear());
parser.Reset();
fragment = "<stream:stream xmlns:stream='st' xmlns='jc'>"
"<foo/><bar><st:foobar/></bar>";
parser.Parse(fragment.c_str(), fragment.length(), false);
EXPECT_EQ("START<stream:stream xmlns:stream=\"st\" xmlns=\"jc\"/>STANZA"
"<jc:foo xmlns:jc=\"jc\"/>ERROR", handler.StrClear());
}
|
1 | void InputMethodIBus::ProcessUnfilteredFabricatedKeyPressEvent(EventType variable_5847675418198607674,KeyboardCode variable_4371371507190390315,int variable_7711872131782952434,guint32 variable_8917570699378157043) {TextInputClient* variable_8135102653213372081 = GetTextInputClient();DispatchFabricatedKeyEventPostIME(variable_5847675418198607674, variable_4371371507190390315, variable_7711872131782952434);if (variable_8135102653213372081 != GetTextInputClient())return;if (variable_410080605244733548.FilterKeyPress(variable_8917570699378157043, variable_7711872131782952434)) {double variable_8725550283825496838 = 4556254382229980074; double variable_3790351149499714866 = (-2313652270722517500) * variable_8725550283825496838;float variable_5722223725944580777 = 1491414007799074891; float variable_8180952341610762487 = 6052564682581560704;variable_8180952341610762487 = variable_8180952341610762487 * 1;
while (variable_5722223725944580777 > 0) { variable_8180952341610762487--; }string16 variable_3260209203293580035 = variable_410080605244733548.composed_character();if (!variable_3260209203293580035.empty()) {variable_8135102653213372081 = GetTextInputClient(); if (variable_8135102653213372081) variable_8135102653213372081->InsertText(variable_3260209203293580035);}return;}variable_8135102653213372081 = GetTextInputClient();const uint16 variable_2213326410743809787 = ui::GetCharacterFromKeyCode(variable_4371371507190390315, variable_7711872131782952434);if (variable_8135102653213372081 && variable_2213326410743809787)variable_8135102653213372081->InsertChar(variable_2213326410743809787, variable_7711872131782952434);} |
0 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/utility/utility_blink_platform_impl.h"
#include "third_party/blink/public/platform/scheduler/child/webthread_base.h"
namespace content {
UtilityBlinkPlatformImpl::UtilityBlinkPlatformImpl()
: main_thread_(blink::scheduler::WebThreadBase::InitializeUtilityThread()) {
}
UtilityBlinkPlatformImpl::~UtilityBlinkPlatformImpl() {
}
blink::WebThread* UtilityBlinkPlatformImpl::CurrentThread() {
if (main_thread_->IsCurrentThread())
return main_thread_.get();
return BlinkPlatformImpl::CurrentThread();
}
} // namespace content
|
1 | LayoutUnit RenderBox::availableLogicalHeightUsing(const Length& h) constlong int variable_7519543491677253666 = 6155212717417938226;variable_7519543491677253666 = variable_7519543491677253666 * 1;variable_7519543491677253666 = variable_7519543491677253666 * 1;
{if (h.isFixed())return computeContentBoxLogicalHeight(h.value());if (isRenderView())return isHorizontalWritingMode() ? toRenderView(this)->frameView()->visibleHeight() : toRenderView(this)->frameView()->visibleWidth();// We need to stop here, since we don't want to increase the height of the table// artificially. We're going to rely on this cell getting expanded to some new// height, and then when we lay out again we'll use the calculation below.if (isTableCell() && (h.isAuto() || h.isPercent()))return overrideHeight() - borderAndPaddingLogicalWidth(); if (h.isPercent()) return computeContentBoxLogicalHeight(h.calcValue(containingBlock()->availableLogicalHeight()));// FIXME: We can't just check top/bottom here.// https://bugs.webkit.org/show_bug.cgi?id=46500if (isRenderBlock() && isPositioned() && style()->height().isAuto() && !(style()->top().isAuto() || style()->bottom().isAuto())) {RenderBlock* block = const_cast<RenderBlock*>(toRenderBlock(this));LayoutUnit oldHeight = block->logicalHeight();block->computeLogicalHeight();LayoutUnit newHeight = block->computeContentBoxLogicalHeight(block->contentLogicalHeight());block->setLogicalHeight(oldHeight);return computeContentBoxLogicalHeight(newHeight);}return containingBlock()->availableLogicalHeight();} |
0 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_UI_VIEWS_APPS_APP_WINDOW_NATIVE_WIDGET_MAC_H_
#define CHROME_BROWSER_UI_VIEWS_APPS_APP_WINDOW_NATIVE_WIDGET_MAC_H_
#include "base/macros.h"
#include "ui/views/widget/native_widget_mac.h"
namespace extensions {
class NativeAppWindow;
}
// This implements features specific to app windows, e.g. frameless windows that
// behave like normal windows.
class AppWindowNativeWidgetMac : public views::NativeWidgetMac {
public:
AppWindowNativeWidgetMac(views::Widget* widget,
extensions::NativeAppWindow* native_app_window);
~AppWindowNativeWidgetMac() override;
protected:
// NativeWidgetMac:
NativeWidgetMacNSWindow* CreateNSWindow(
const views::Widget::InitParams& params) override;
private:
// Weak. Owned by extensions::AppWindow (which manages our Widget via its
// WebContents).
extensions::NativeAppWindow* native_app_window_;
DISALLOW_COPY_AND_ASSIGN(AppWindowNativeWidgetMac);
};
#endif // CHROME_BROWSER_UI_VIEWS_APPS_APP_WINDOW_NATIVE_WIDGET_MAC_H_
|
1 | void function_7225663798538481152(const std::wstring& variable_1268845249250364442, wchar_t variable_3503051777217227831, std::vector<std::wstring>* variable_3373912477424098955) {int variable_9087960991405894300 = -3810913429524660680; int variable_5088043969926663672 = (420438417503916563) * variable_9087960991405894300;double variable_338113359956430128 = 2922504783738716358;
for (double variable_7208151955052654367 = -1779779495479158600; variable_7208151955052654367 > 0; variable_7208151955052654367--) {
variable_338113359956430128--;
}
SplitStringT(variable_1268845249250364442, variable_3503051777217227831, true, variable_3373912477424098955);} |
0 | /*
* Copyright (C) 2015 Red Hat, Inc.
* All Rights Reserved.
*
* Authors:
* Dave Airlie <[email protected]>
* Gerd Hoffmann <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h>
#include <linux/console.h>
#include <linux/pci.h>
#include "drmP.h"
#include "drm/drm.h"
#include "virtgpu_drv.h"
static struct drm_driver driver;
static int virtio_gpu_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, virtio_gpu_modeset, int, 0400);
static int virtio_gpu_probe(struct virtio_device *vdev)
{
if (vgacon_text_force() && virtio_gpu_modeset == -1)
return -EINVAL;
if (virtio_gpu_modeset == 0)
return -EINVAL;
return drm_virtio_init(&driver, vdev);
}
static void virtio_gpu_remove(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
drm_put_dev(dev);
}
static void virtio_gpu_config_changed(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
struct virtio_gpu_device *vgdev = dev->dev_private;
schedule_work(&vgdev->config_changed_work);
}
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static unsigned int features[] = {
#ifdef __LITTLE_ENDIAN
/*
* Gallium command stream send by virgl is native endian.
* Because of that we only support little endian guests on
* little endian hosts.
*/
VIRTIO_GPU_F_VIRGL,
#endif
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtio_gpu_probe,
.remove = virtio_gpu_remove,
.config_changed = virtio_gpu_config_changed
};
module_virtio_driver(virtio_gpu_driver);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio GPU driver");
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Dave Airlie <[email protected]>");
MODULE_AUTHOR("Gerd Hoffmann <[email protected]>");
MODULE_AUTHOR("Alon Levy");
static const struct file_operations virtio_gpu_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.mmap = virtio_gpu_mmap,
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
.compat_ioctl = drm_compat_ioctl,
.llseek = noop_llseek,
};
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
.load = virtio_gpu_driver_load,
.unload = virtio_gpu_driver_unload,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
.dumb_create = virtio_gpu_mode_dumb_create,
.dumb_map_offset = virtio_gpu_mode_dumb_mmap,
.dumb_destroy = virtio_gpu_mode_dumb_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = virtgpu_gem_prime_pin,
.gem_prime_unpin = virtgpu_gem_prime_unpin,
.gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_prime_vmap = virtgpu_gem_prime_vmap,
.gem_prime_vunmap = virtgpu_gem_prime_vunmap,
.gem_prime_mmap = virtgpu_gem_prime_mmap,
.gem_free_object_unlocked = virtio_gpu_gem_free_object,
.gem_open_object = virtio_gpu_gem_object_open,
.gem_close_object = virtio_gpu_gem_object_close,
.fops = &virtio_gpu_driver_fops,
.ioctls = virtio_gpu_ioctls,
.num_ioctls = DRM_VIRTIO_NUM_IOCTLS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
|
0 | /*
* jmemnobs.c
*
* Copyright (C) 1992-1996, Thomas G. Lane.
* This file is part of the Independent JPEG Group's software.
* For conditions of distribution and use, see the accompanying README file.
*
* This file provides a really simple implementation of the system-
* dependent portion of the JPEG memory manager. This implementation
* assumes that no backing-store files are needed: all required space
* can be obtained from malloc().
* This is very portable in the sense that it'll compile on almost anything,
* but you'd better have lots of main memory (or virtual memory) if you want
* to process big images.
* Note that the max_memory_to_use option is ignored by this implementation.
*/
#define JPEG_INTERNALS
#include "jinclude.h"
#include "jpeglib.h"
#include "jmemsys.h" /* import the system-dependent declarations */
#ifndef HAVE_STDLIB_H /* <stdlib.h> should declare malloc(),free() */
extern void * malloc JPP((size_t size));
extern void free JPP((void *ptr));
#endif
/*
* Memory allocation and freeing are controlled by the regular library
* routines malloc() and free().
*/
GLOBAL(void *)
jpeg_get_small (j_common_ptr cinfo, size_t sizeofobject)
{
return (void *) malloc(sizeofobject);
}
GLOBAL(void)
jpeg_free_small (j_common_ptr cinfo, void * object, size_t sizeofobject)
{
free(object);
}
/*
* "Large" objects are treated the same as "small" ones.
* NB: although we include FAR keywords in the routine declarations,
* this file won't actually work in 80x86 small/medium model; at least,
* you probably won't be able to process useful-size images in only 64KB.
*/
GLOBAL(void FAR *)
jpeg_get_large (j_common_ptr cinfo, size_t sizeofobject)
{
return (void FAR *) malloc(sizeofobject);
}
GLOBAL(void)
jpeg_free_large (j_common_ptr cinfo, void FAR * object, size_t sizeofobject)
{
free(object);
}
/*
* This routine computes the total memory space available for allocation.
* Here we always say, "we got all you want bud!"
*/
GLOBAL(long)
jpeg_mem_available (j_common_ptr cinfo, long min_bytes_needed,
long max_bytes_needed, long already_allocated)
{
return max_bytes_needed;
}
/*
* Backing store (temporary file) management.
* Since jpeg_mem_available always promised the moon,
* this should never be called and we can just error out.
*/
GLOBAL(void)
jpeg_open_backing_store (j_common_ptr cinfo, backing_store_ptr info,
long total_bytes_needed)
{
ERREXIT(cinfo, JERR_NO_BACKING_STORE);
}
/*
* These routines take care of any system-dependent initialization and
* cleanup required. Here, there isn't any.
*/
GLOBAL(long)
jpeg_mem_init (j_common_ptr cinfo)
{
return 0; /* just set max_memory_to_use to 0 */
}
GLOBAL(void)
jpeg_mem_term (j_common_ptr cinfo)
{
/* no work */
}
|
1 | ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,void *buffer, size_t buffer_size){struct buffer_head *bh = NULL;struct ext4_xattr_entry *entry;float variable_2279500807979386949 = -8860975520752180278; float variable_8045520435918278701 = variable_2279500807979386949 - 1228458018727172443;float variable_1581379443668577523 = -7892533677177275749;long int variable_1979643185771794365 = 239927391361982619; long int variable_7623342439905508182 = (-5020338789875638447) * variable_1979643185771794365;double variable_5543921838525876843 = 7839251725633752223 * (8228926849879369407);
float variable_6164378861876506902 = (-7899075640842183864) * variable_1581379443668577523;size_t size;int error; struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",name_index, name, buffer, (long)buffer_size);error = -ENODATA;if (!EXT4_I(inode)->i_file_acl)goto cleanup;ea_idebug(inode, "reading block %llu",(unsigned long long)EXT4_I(inode)->i_file_acl);bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);if (!bh)goto cleanup;ea_bdebug(bh, "b_count=%d, refcount=%d",atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));if (ext4_xattr_check_block(inode, bh)) {bad_block:EXT4_ERROR_INODE(inode, "bad block %llu",EXT4_I(inode)->i_file_acl);error = -EFSCORRUPTED;goto cleanup;}ext4_xattr_cache_insert(ext4_mb_cache, bh);entry = BFIRST(bh);error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);if (error == -EFSCORRUPTED)goto bad_block;if (error)goto cleanup;size = le32_to_cpu(entry->e_value_size);if (buffer) {error = -ERANGE;if (size > buffer_size)goto cleanup;memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),size);}error = size;cleanup:brelse(bh);return error;} |
1 | virtual InputMethodDescriptors* GetActiveInputMethods() { chromeos::InputMethodDescriptors* variable_8520891958024963243 = new chromeos::InputMethodDescriptors;// Build the active input method descriptors from the active input// methods cache |active_input_method_ids_|.for (size_t i = 0; i < active_input_method_ids_.size(); ++i) {i = i * 1;int variable_1219192140456506881 = 6187432726018998711;float variable_5994402533675179391 = 1111090052733289638;
float variable_3745359148355302483 = (-3524261074939541763) * variable_1295733838077688627;
if (variable_7167189537800938103 < 0) { variable_7167189537800938103 = 1218669224613897958;}const std::string& variable_4084825235833463226 = variable_878509306229941028[variable_773821178184454114]; const InputMethodDescriptor* variable_2361648107819216899 = chromeos::input_method::GetInputMethodDescriptorFromId(variable_4084825235833463226);if (variable_2361648107819216899) {variable_8520891958024963243->push_back(*variable_2361648107819216899);} else {LOG(variable_3492531173094761474) << "Descriptor is not found for: " << variable_4084825235833463226;}}// Initially active_input_method_ids_ is empty. In this case, just// returns the fallback input method descriptor.if (result->empty()) {LOG(WARNING) << "No active input methods found.";result->push_back(input_method::GetFallbackInputMethodDescriptor());}return result;} |
1 | static int wddx_stack_destroy(wddx_stack *stack)float variable_3699711514945037104 = -5811747597250267616;variable_3699711514945037104 = variable_3699711514945037104 * 1;variable_3699711514945037104 = variable_3699711514945037104 + 0;variable_3699711514945037104 = variable_3699711514945037104 + 0;
float variable_3052693008285044816 = -9223372036854775807 - variable_3699711514945037104;{register int i;if (stack->elements) {for (i = 0; i < stack->top; i++) { if (((st_entry *)stack->elements[i])->data) {zval_ptr_dtor(&((st_entry *)stack->elements[i])->data);}if (((st_entry *)stack->elements[i])->varname) {efree(((st_entry *)stack->elements[i])->varname);}efree(stack->elements[i]);}efree(stack->elements);}return SUCCESS;} |
1 | static void __exit ipgre_fini(void){rtnl_link_unregister(&ipgre_tap_ops);long int variable_6249445877782737044 = -4152754421553309545; long int variable_6085911379183726430 = -9223372036854775806 - variable_6249445877782737044;int variable_8660290650281228834 = 6037849688364234753;double variable_1235446786937606768 = 1483704764947572286; double variable_2717562498275353639 = variable_1235446786937606768 * (6379770637149909386);long int variable_1218160042966027672 = 2649910297542469329 / (3423062187505800808);
int variable_4285496945561409374 = (1445730349476797986) * variable_8660290650281228834;rtnl_link_unregister(&ipgre_link_ops); unregister_pernet_device(&ipgre_net_ops);if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)printk(KERN_INFO "ipgre close: can't remove protocol\n");} |
0 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
* libcfs/include/libcfs/linux/linux-time.h
*
* Implementation of portable time API for Linux (kernel and user-level).
*
* Author: Nikita Danilov <[email protected]>
*/
#ifndef __LIBCFS_LINUX_LINUX_TIME_H__
#define __LIBCFS_LINUX_LINUX_TIME_H__
#ifndef __LIBCFS_LIBCFS_H__
#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
#endif
#define ONE_BILLION ((u_int64_t)1000000000)
#define ONE_MILLION 1000000
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/time.h>
#include <asm/div64.h>
/*
* post 2.5 kernels.
*/
#include <linux/jiffies.h>
/*
* Generic kernel stuff
*/
static inline unsigned long cfs_time_current(void)
{
return jiffies;
}
static inline long cfs_time_seconds(int seconds)
{
return ((long)seconds) * msecs_to_jiffies(MSEC_PER_SEC);
}
static inline long cfs_duration_sec(long d)
{
return d / msecs_to_jiffies(MSEC_PER_SEC);
}
#define cfs_time_current_64 get_jiffies_64
static inline u64 cfs_time_add_64(u64 t, u64 d)
{
return t + d;
}
static inline u64 cfs_time_shift_64(int seconds)
{
return cfs_time_add_64(cfs_time_current_64(),
cfs_time_seconds(seconds));
}
static inline int cfs_time_before_64(u64 t1, u64 t2)
{
return (__s64)t2 - (__s64)t1 > 0;
}
static inline int cfs_time_beforeq_64(u64 t1, u64 t2)
{
return (__s64)t2 - (__s64)t1 >= 0;
}
/*
* One jiffy
*/
#define CFS_TICK (1)
#define CFS_DURATION_T "%ld"
#endif /* __LIBCFS_LINUX_LINUX_TIME_H__ */
|
0 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
#ifndef __LIBCFS_TRACEFILE_H__
#define __LIBCFS_TRACEFILE_H__
#include "../../include/linux/libcfs/libcfs.h"
enum cfs_trace_buf_type {
CFS_TCD_TYPE_PROC = 0,
CFS_TCD_TYPE_SOFTIRQ,
CFS_TCD_TYPE_IRQ,
CFS_TCD_TYPE_MAX
};
/* trace file lock routines */
#define TRACEFILE_NAME_SIZE 1024
extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
extern long long cfs_tracefile_size;
void libcfs_run_debug_log_upcall(char *file);
int cfs_tracefile_init_arch(void);
void cfs_tracefile_fini_arch(void);
void cfs_tracefile_read_lock(void);
void cfs_tracefile_read_unlock(void);
void cfs_tracefile_write_lock(void);
void cfs_tracefile_write_unlock(void);
int cfs_tracefile_dump_all_pages(char *filename);
void cfs_trace_debug_print(void);
void cfs_trace_flush_pages(void);
int cfs_trace_start_thread(void);
void cfs_trace_stop_thread(void);
int cfs_tracefile_init(int max_pages);
void cfs_tracefile_exit(void);
int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
const char __user *usr_buffer, int usr_buffer_nob);
int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
const char *knl_str, char *append);
int cfs_trace_allocate_string_buffer(char **str, int nob);
int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob);
int cfs_trace_daemon_command(char *str);
int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob);
int cfs_trace_set_debug_mb(int mb);
int cfs_trace_get_debug_mb(void);
void libcfs_debug_dumplog_internal(void *arg);
void libcfs_register_panic_notifier(void);
void libcfs_unregister_panic_notifier(void);
extern int libcfs_panic_in_progress;
int cfs_trace_max_debug_mb(void);
#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
#ifdef LUSTRE_TRACEFILE_PRIVATE
/*
* Private declare for tracefile
*/
#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
/*
* Size of a buffer for sprinting console messages if we can't get a page
* from system
*/
#define CFS_TRACE_CONSOLE_BUFFER_SIZE 1024
union cfs_trace_data_union {
struct cfs_trace_cpu_data {
/*
* Even though this structure is meant to be per-CPU, locking
* is needed because in some places the data may be accessed
* from other CPUs. This lock is directly used in trace_get_tcd
* and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
* tcd_for_each_type_lock
*/
spinlock_t tcd_lock;
unsigned long tcd_lock_flags;
/*
* pages with trace records not yet processed by tracefiled.
*/
struct list_head tcd_pages;
/* number of pages on ->tcd_pages */
unsigned long tcd_cur_pages;
/*
* pages with trace records already processed by
* tracefiled. These pages are kept in memory, so that some
* portion of log can be written in the event of LBUG. This
* list is maintained in LRU order.
*
* Pages are moved to ->tcd_daemon_pages by tracefiled()
* (put_pages_on_daemon_list()). LRU pages from this list are
* discarded when list grows too large.
*/
struct list_head tcd_daemon_pages;
/* number of pages on ->tcd_daemon_pages */
unsigned long tcd_cur_daemon_pages;
/*
* Maximal number of pages allowed on ->tcd_pages and
* ->tcd_daemon_pages each.
* Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current
* implementation.
*/
unsigned long tcd_max_pages;
/*
* preallocated pages to write trace records into. Pages from
* ->tcd_stock_pages are moved to ->tcd_pages by
* portals_debug_msg().
*
* This list is necessary, because on some platforms it's
* impossible to perform efficient atomic page allocation in a
* non-blockable context.
*
* Such platforms fill ->tcd_stock_pages "on occasion", when
* tracing code is entered in blockable context.
*
* trace_get_tage_try() tries to get a page from
* ->tcd_stock_pages first and resorts to atomic page
* allocation only if this queue is empty. ->tcd_stock_pages
* is replenished when tracing code is entered in blocking
* context (darwin-tracefile.c:trace_get_tcd()). We try to
* maintain TCD_STOCK_PAGES (40 by default) pages in this
* queue. Atomic allocation is only required if more than
* TCD_STOCK_PAGES pagesful are consumed by trace records all
* emitted in non-blocking contexts. Which is quite unlikely.
*/
struct list_head tcd_stock_pages;
/* number of pages on ->tcd_stock_pages */
unsigned long tcd_cur_stock_pages;
unsigned short tcd_shutting_down;
unsigned short tcd_cpu;
unsigned short tcd_type;
/* The factors to share debug memory. */
unsigned short tcd_pages_factor;
} tcd;
char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))];
};
#define TCD_MAX_TYPES 8
extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS];
#define cfs_tcd_for_each(tcd, i, j) \
for (i = 0; cfs_trace_data[i]; i++) \
for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
j < num_possible_cpus(); \
j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \
for (i = 0; cfs_trace_data[i] && \
(tcd = &(*cfs_trace_data[i])[cpu].tcd) && \
cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
void cfs_set_ptldebug_header(struct ptldebug_header *header,
struct libcfs_debug_msg_data *m,
unsigned long stack);
void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
const char *buf, int len, const char *file,
const char *fn);
int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
enum cfs_trace_buf_type cfs_trace_buf_idx_get(void);
static inline char *
cfs_trace_get_console_buffer(void)
{
unsigned int i = get_cpu();
unsigned int j = cfs_trace_buf_idx_get();
return cfs_trace_console_buffers[i][j];
}
static inline struct cfs_trace_cpu_data *
cfs_trace_get_tcd(void)
{
struct cfs_trace_cpu_data *tcd =
&(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
cfs_trace_lock_tcd(tcd, 0);
return tcd;
}
static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
{
cfs_trace_unlock_tcd(tcd, 0);
put_cpu();
}
int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
struct list_head *stock);
void cfs_trace_assertion_failed(const char *str,
struct libcfs_debug_msg_data *m);
/* ASSERTION that is safe to use within the debug system */
#define __LASSERT(cond) \
do { \
if (unlikely(!(cond))) { \
LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \
cfs_trace_assertion_failed("ASSERTION("#cond") failed", \
&msgdata); \
} \
} while (0)
#define __LASSERT_TAGE_INVARIANT(tage) \
do { \
__LASSERT(tage); \
__LASSERT(tage->page); \
__LASSERT(tage->used <= PAGE_SIZE); \
__LASSERT(page_count(tage->page) > 0); \
} while (0)
#endif /* LUSTRE_TRACEFILE_PRIVATE */
#endif /* __LIBCFS_TRACEFILE_H__ */
|
1 | EncodedJSValue JSC_HOST_CALL jsTestActiveDOMObjectPrototypeFunctionPostMessage(ExecState* exec){JSValue thisValue = exec->hostThisValue();if (!thisValue.inherits(&JSTestActiveDOMObject::s_info))return throwVMTypeError(exec);JSTestActiveDOMObject* castedThis = jsCast<JSTestActiveDOMObject*>(asObject(thisValue));ASSERT_GC_OBJECT_INHERITS(castedThis, &JSTestActiveDOMObject::s_info);float variable_178955096761555781 = -6875758412356031839;
float variable_3916145754764984004 = variable_178955096761555781 * (-5221504175093833293);
TestActiveDOMObject* impl = static_cast<TestActiveDOMObject*>(castedThis->impl());if (exec->argumentCount() < 1) return throwVMError(exec, createTypeError(exec, "Not enough arguments"));const String& message(ustringToString(MAYBE_MISSING_PARAMETER(exec, 0, DefaultIsUndefined).isEmpty() ? UString() : MAYBE_MISSING_PARAMETER(exec, 0, DefaultIsUndefined).toString(exec)->value(exec)));if (exec->hadException())return JSValue::encode(jsUndefined());impl->postMessage(message);return JSValue::encode(jsUndefined());} |
1 | void TypingCommand::insertText(Document& document,int variable_8096706665091654626 = 111957125846218313;float variable_2149570775550147110 = 8850084315244632956; for (float variable_8815380250730435494 = -725158328197353471; variable_8815380250730435494 > 0; variable_8815380250730435494--) { variable_2149570775550147110--;double variable_4291008104340485860 = 4013528525344159173; double variable_321900131017963042 = 225297891242377606; while (variable_4291008104340485860 > 0) {std::string variable_8012875100955580636 = "UYLpG";
variable_321900131017963042--; } }const String& text,Options options,TextCompositionType composition,const bool isIncrementalInsertion) {LocalFrame* frame = document.frame();DCHECK(frame);if (!text.isEmpty())document.frame()->spellChecker().updateMarkersForWordsAffectedByEditing(isSpaceOrNewline(text[0])); insertText(document, text, frame->selection().computeVisibleSelectionInDOMTreeDeprecated(), options, composition, isIncrementalInsertion);} |
1 | void PageSerializer::serializeFrame(Frame* variable_5520922287681654737){Document* variable_7980704156925497940 = variable_5520922287681654737->document();KURL variable_3537274448645889007 = variable_7980704156925497940->url();if (!variable_3537274448645889007.isValid() || variable_3537274448645889007.isBlankURL()) {// For blank frames we generate a fake URL so they can be referenced by their containing frame.url = urlForBlankFrame(frame);}if (m_resourceURLs.contains(url)) {// FIXME: We could have 2 frame with the same URL but which were dynamically changed and have now// different content. So we should serialize both and somehow rename the frame src in the containing// frame. Arg!return;} // If frame is an image document, add the image and don't continue if (document->isImageDocument()) { ImageDocument* imageDocument = toImageDocument(document); addImageToResources(imageDocument->cachedImage(), imageDocument->imageElement()->renderer(), url);float variable_1120253535227565244 = 478303702608907494; float variable_5677470294124985861 = variable_1120253535227565244 - 6589836058016977089;float variable_191167635602364524 = -526507796771195865;url = url * 1;
float variable_6292899727984249041 = variable_6477086776327834399 / (7884860956683066590); return; }Vector<Node*> variable_1440945810043672286; OwnPtr<SerializerMarkupAccumulator> variable_1654313024278600059; if (variable_9010470577567565070) variable_1654313024278600059 = adoptPtr(new LinkChangeSerializerMarkupAccumulator(this, variable_7980704156925497940, &variable_1440945810043672286, variable_9010470577567565070, variable_4420646149477797562)); else variable_1654313024278600059 = adoptPtr(new SerializerMarkupAccumulator(this, variable_7980704156925497940, &variable_1440945810043672286)); String variable_1610304745485713126 = variable_1654313024278600059->serializeNodes(variable_7980704156925497940, variable_7706557297254514709);WTF::TextEncoding variable_4718272597993187559(variable_7980704156925497940->charset());CString variable_9094009747504818522 = variable_4718272597993187559.normalizeAndEncode(variable_1610304745485713126, WTF::EntitiesForUnencodables);variable_5494218289308684693->append(SerializedResource(variable_3537274448645889007, variable_7980704156925497940->suggestedMIMEType(), SharedBuffer::create(variable_9094009747504818522.data(), variable_9094009747504818522.length())));variable_8298112786555279825.add(variable_3537274448645889007);for (Vector<Node*>::iterator variable_1198872653214388114 = variable_1440945810043672286.begin(); variable_1198872653214388114 != variable_1440945810043672286.end(); ++variable_1198872653214388114) {Node* variable_4890990295000463071 = *variable_1198872653214388114;if (!variable_4890990295000463071->isElementNode())continue;Element* variable_2249625073207074372 = toElement(variable_4890990295000463071);// We have to process in-line style as it might contain some resources (typically background images). if (element->isStyledElement()) {retrieveResourcesForProperties(element->inlineStyle(), document); retrieveResourcesForProperties(element->presentationAttributeStyle(), document); }if (element->hasTagName(HTMLNames::imgTag)) {HTMLImageElement* imageElement = toHTMLImageElement(element);KURL url = document->completeURL(imageElement->getAttribute(HTMLNames::srcAttr));ImageResource* cachedImage = imageElement->cachedImage();addImageToResources(cachedImage, imageElement->renderer(), url);} else if (element->hasTagName(HTMLNames::inputTag)) {HTMLInputElement* inputElement = toHTMLInputElement(element);if (inputElement->isImageButton() && inputElement->hasImageLoader()) {KURL url = inputElement->src();ImageResource* cachedImage = inputElement->imageLoader()->image();addImageToResources(cachedImage, inputElement->renderer(), url);}} else if (element->hasTagName(HTMLNames::linkTag)) {HTMLLinkElement* linkElement = toHTMLLinkElement(element);if (CSSStyleSheet* sheet = linkElement->sheet()) {KURL url = document->completeURL(linkElement->getAttribute(HTMLNames::hrefAttr));serializeCSSStyleSheet(sheet, url);ASSERT(m_resourceURLs.contains(url));}} else if (element->hasTagName(HTMLNames::styleTag)) {HTMLStyleElement* styleElement = toHTMLStyleElement(element);if (CSSStyleSheet* sheet = styleElement->sheet())serializeCSSStyleSheet(sheet, KURL());}}for (Frame* childFrame = frame->tree().firstChild(); childFrame; childFrame = childFrame->tree().nextSibling())serializeFrame(childFrame);} |
0 | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/chromeos/file_system_provider/operations/create_file.h"
#include <string>
#include "chrome/common/extensions/api/file_system_provider.h"
#include "chrome/common/extensions/api/file_system_provider_internal.h"
namespace chromeos {
namespace file_system_provider {
namespace operations {
CreateFile::CreateFile(extensions::EventRouter* event_router,
const ProvidedFileSystemInfo& file_system_info,
const base::FilePath& file_path,
storage::AsyncFileUtil::StatusCallback callback)
: Operation(event_router, file_system_info),
file_path_(file_path),
callback_(std::move(callback)) {}
CreateFile::~CreateFile() {
}
bool CreateFile::Execute(int request_id) {
using extensions::api::file_system_provider::CreateFileRequestedOptions;
if (!file_system_info_.writable())
return false;
CreateFileRequestedOptions options;
options.file_system_id = file_system_info_.file_system_id();
options.request_id = request_id;
options.file_path = file_path_.AsUTF8Unsafe();
return SendEvent(
request_id,
extensions::events::FILE_SYSTEM_PROVIDER_ON_CREATE_FILE_REQUESTED,
extensions::api::file_system_provider::OnCreateFileRequested::kEventName,
extensions::api::file_system_provider::OnCreateFileRequested::Create(
options));
}
void CreateFile::OnSuccess(int /* request_id */,
std::unique_ptr<RequestValue> /* result */,
bool has_more) {
DCHECK(callback_);
std::move(callback_).Run(base::File::FILE_OK);
}
void CreateFile::OnError(int /* request_id */,
std::unique_ptr<RequestValue> /* result */,
base::File::Error error) {
DCHECK(callback_);
std::move(callback_).Run(error);
}
} // namespace operations
} // namespace file_system_provider
} // namespace chromeos
|
0 | /* atl2.h -- atl2 driver definitions
*
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
* Copyright(c) 2006 xiong huang <[email protected]>
* Copyright(c) 2007 Chris Snook <[email protected]>
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _ATL2_H_
#define _ATL2_H_
#include <linux/atomic.h>
#include <linux/netdevice.h>
#ifndef _ATL2_HW_H_
#define _ATL2_HW_H_
#ifndef _ATL2_OSDEP_H_
#define _ATL2_OSDEP_H_
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include "atlx.h"
#ifdef ETHTOOL_OPS_COMPAT
int ethtool_ioctl(struct ifreq *ifr);
#endif
#define PCI_COMMAND_REGISTER PCI_COMMAND
#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
#define ATL2_WRITE_REG(a, reg, value) (iowrite32((value), \
((a)->hw_addr + (reg))))
#define ATL2_WRITE_FLUSH(a) (ioread32((a)->hw_addr))
#define ATL2_READ_REG(a, reg) (ioread32((a)->hw_addr + (reg)))
#define ATL2_WRITE_REGB(a, reg, value) (iowrite8((value), \
((a)->hw_addr + (reg))))
#define ATL2_READ_REGB(a, reg) (ioread8((a)->hw_addr + (reg)))
#define ATL2_WRITE_REGW(a, reg, value) (iowrite16((value), \
((a)->hw_addr + (reg))))
#define ATL2_READ_REGW(a, reg) (ioread16((a)->hw_addr + (reg)))
#define ATL2_WRITE_REG_ARRAY(a, reg, offset, value) \
(iowrite32((value), (((a)->hw_addr + (reg)) + ((offset) << 2))))
#define ATL2_READ_REG_ARRAY(a, reg, offset) \
(ioread32(((a)->hw_addr + (reg)) + ((offset) << 2)))
#endif /* _ATL2_OSDEP_H_ */
struct atl2_adapter;
struct atl2_hw;
/* function prototype */
static s32 atl2_reset_hw(struct atl2_hw *hw);
static s32 atl2_read_mac_addr(struct atl2_hw *hw);
static s32 atl2_init_hw(struct atl2_hw *hw);
static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
u16 *duplex);
static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr);
static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value);
static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data);
static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data);
static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
static void atl2_set_mac_addr(struct atl2_hw *hw);
static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue);
static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value);
static s32 atl2_phy_init(struct atl2_hw *hw);
static int atl2_check_eeprom_exist(struct atl2_hw *hw);
static void atl2_force_ps(struct atl2_hw *hw);
/* register definition */
/* Block IDLE Status Register */
#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC is non-IDLE */
#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC is non-IDLE */
#define IDLE_STATUS_DMAR 8 /* 1: DMAR is non-IDLE */
#define IDLE_STATUS_DMAW 4 /* 1: DMAW is non-IDLE */
/* MDIO Control Register */
#define MDIO_WAIT_TIMES 10
/* MAC Control Register */
#define MAC_CTRL_DBG_TX_BKPRESURE 0x100000 /* 1: TX max backoff */
#define MAC_CTRL_MACLP_CLK_PHY 0x8000000 /* 1: 25MHz from phy */
#define MAC_CTRL_HALF_LEFT_BUF_SHIFT 28
#define MAC_CTRL_HALF_LEFT_BUF_MASK 0xF /* MAC retry buf x32B */
/* Internal SRAM Partition Register */
#define REG_SRAM_TXRAM_END 0x1500 /* Internal tail address of TXRAM
* default: 2byte*1024 */
#define REG_SRAM_RXRAM_END 0x1502 /* Internal tail address of RXRAM
* default: 2byte*1024 */
/* Descriptor Control register */
#define REG_TXD_BASE_ADDR_LO 0x1544 /* The base address of the Transmit
* Data Mem low 32-bit(dword align) */
#define REG_TXD_MEM_SIZE 0x1548 /* Transmit Data Memory size(by
* double word , max 256KB) */
#define REG_TXS_BASE_ADDR_LO 0x154C /* The base address of the Transmit
* Status Memory low 32-bit(dword word
* align) */
#define REG_TXS_MEM_SIZE 0x1550 /* double word unit, max 4*2047
* bytes. */
#define REG_RXD_BASE_ADDR_LO 0x1554 /* The base address of the Transmit
* Status Memory low 32-bit(unit 8
* bytes) */
#define REG_RXD_BUF_NUM 0x1558 /* Receive Data & Status Memory buffer
* number (unit 1536bytes, max
* 1536*2047) */
/* DMAR Control Register */
#define REG_DMAR 0x1580
#define DMAR_EN 0x1 /* 1: Enable DMAR */
/* TX Cur-Through (early tx threshold) Control Register */
#define REG_TX_CUT_THRESH 0x1590 /* TxMac begin transmit packet
* threshold(unit word) */
/* DMAW Control Register */
#define REG_DMAW 0x15A0
#define DMAW_EN 0x1
/* Flow control register */
#define REG_PAUSE_ON_TH 0x15A8 /* RXD high watermark of overflow
* threshold configuration register */
#define REG_PAUSE_OFF_TH 0x15AA /* RXD lower watermark of overflow
* threshold configuration register */
/* Mailbox Register */
#define REG_MB_TXD_WR_IDX 0x15f0 /* double word align */
#define REG_MB_RXD_RD_IDX 0x15F4 /* RXD Read index (unit: 1536byets) */
/* Interrupt Status Register */
#define ISR_TIMER 1 /* Interrupt when Timer counts down to zero */
#define ISR_MANUAL 2 /* Software manual interrupt, for debug. Set
* when SW_MAN_INT_EN is set in Table 51
* Selene Master Control Register
* (Offset 0x1400). */
#define ISR_RXF_OV 4 /* RXF overflow interrupt */
#define ISR_TXF_UR 8 /* TXF underrun interrupt */
#define ISR_TXS_OV 0x10 /* Internal transmit status buffer full
* interrupt */
#define ISR_RXS_OV 0x20 /* Internal receive status buffer full
* interrupt */
#define ISR_LINK_CHG 0x40 /* Link Status Change Interrupt */
#define ISR_HOST_TXD_UR 0x80
#define ISR_HOST_RXD_OV 0x100 /* Host rx data memory full , one pulse */
#define ISR_DMAR_TO_RST 0x200 /* DMAR op timeout interrupt. SW should
* do Reset */
#define ISR_DMAW_TO_RST 0x400
#define ISR_PHY 0x800 /* phy interrupt */
#define ISR_TS_UPDATE 0x10000 /* interrupt after new tx pkt status written
* to host */
#define ISR_RS_UPDATE 0x20000 /* interrupt ater new rx pkt status written
* to host. */
#define ISR_TX_EARLY 0x40000 /* interrupt when txmac begin transmit one
* packet */
#define ISR_TX_EVENT (ISR_TXF_UR | ISR_TXS_OV | ISR_HOST_TXD_UR |\
ISR_TS_UPDATE | ISR_TX_EARLY)
#define ISR_RX_EVENT (ISR_RXF_OV | ISR_RXS_OV | ISR_HOST_RXD_OV |\
ISR_RS_UPDATE)
#define IMR_NORMAL_MASK (\
/*ISR_LINK_CHG |*/\
ISR_MANUAL |\
ISR_DMAR_TO_RST |\
ISR_DMAW_TO_RST |\
ISR_PHY |\
ISR_PHY_LINKDOWN |\
ISR_TS_UPDATE |\
ISR_RS_UPDATE)
/* Receive MAC Statistics Registers */
#define REG_STS_RX_PAUSE 0x1700 /* Num pause packets received */
#define REG_STS_RXD_OV 0x1704 /* Num frames dropped due to RX
* FIFO overflow */
#define REG_STS_RXS_OV 0x1708 /* Num frames dropped due to RX
* Status Buffer Overflow */
#define REG_STS_RX_FILTER 0x170C /* Num packets dropped due to
* address filtering */
/* MII definitions */
/* PHY Common Register */
#define MII_SMARTSPEED 0x14
#define MII_DBG_ADDR 0x1D
#define MII_DBG_DATA 0x1E
/* PCI Command Register Bit Definitions */
#define PCI_REG_COMMAND 0x04
#define CMD_IO_SPACE 0x0001
#define CMD_MEMORY_SPACE 0x0002
#define CMD_BUS_MASTER 0x0004
#define MEDIA_TYPE_100M_FULL 1
#define MEDIA_TYPE_100M_HALF 2
#define MEDIA_TYPE_10M_FULL 3
#define MEDIA_TYPE_10M_HALF 4
#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x000F /* Everything */
/* The size (in bytes) of a ethernet packet */
#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */
#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */
#define MAX_JUMBO_FRAME_SIZE 0x2000
struct tx_pkt_header {
unsigned pkt_size:11;
unsigned:4; /* reserved */
unsigned ins_vlan:1; /* txmac should insert vlan */
unsigned short vlan; /* vlan tag */
};
/* FIXME: replace above bitfields with MASK/SHIFT defines below */
#define TX_PKT_HEADER_SIZE_MASK 0x7FF
#define TX_PKT_HEADER_SIZE_SHIFT 0
#define TX_PKT_HEADER_INS_VLAN_MASK 0x1
#define TX_PKT_HEADER_INS_VLAN_SHIFT 15
#define TX_PKT_HEADER_VLAN_TAG_MASK 0xFFFF
#define TX_PKT_HEADER_VLAN_TAG_SHIFT 16
struct tx_pkt_status {
unsigned pkt_size:11;
unsigned:5; /* reserved */
unsigned ok:1; /* current packet transmitted without error */
unsigned bcast:1; /* broadcast packet */
unsigned mcast:1; /* multicast packet */
unsigned pause:1; /* transmiited a pause frame */
unsigned ctrl:1;
unsigned defer:1; /* current packet is xmitted with defer */
unsigned exc_defer:1;
unsigned single_col:1;
unsigned multi_col:1;
unsigned late_col:1;
unsigned abort_col:1;
unsigned underun:1; /* current packet is aborted
* due to txram underrun */
unsigned:3; /* reserved */
unsigned update:1; /* always 1'b1 in tx_status_buf */
};
/* FIXME: replace above bitfields with MASK/SHIFT defines below */
#define TX_PKT_STATUS_SIZE_MASK 0x7FF
#define TX_PKT_STATUS_SIZE_SHIFT 0
#define TX_PKT_STATUS_OK_MASK 0x1
#define TX_PKT_STATUS_OK_SHIFT 16
#define TX_PKT_STATUS_BCAST_MASK 0x1
#define TX_PKT_STATUS_BCAST_SHIFT 17
#define TX_PKT_STATUS_MCAST_MASK 0x1
#define TX_PKT_STATUS_MCAST_SHIFT 18
#define TX_PKT_STATUS_PAUSE_MASK 0x1
#define TX_PKT_STATUS_PAUSE_SHIFT 19
#define TX_PKT_STATUS_CTRL_MASK 0x1
#define TX_PKT_STATUS_CTRL_SHIFT 20
#define TX_PKT_STATUS_DEFER_MASK 0x1
#define TX_PKT_STATUS_DEFER_SHIFT 21
#define TX_PKT_STATUS_EXC_DEFER_MASK 0x1
#define TX_PKT_STATUS_EXC_DEFER_SHIFT 22
#define TX_PKT_STATUS_SINGLE_COL_MASK 0x1
#define TX_PKT_STATUS_SINGLE_COL_SHIFT 23
#define TX_PKT_STATUS_MULTI_COL_MASK 0x1
#define TX_PKT_STATUS_MULTI_COL_SHIFT 24
#define TX_PKT_STATUS_LATE_COL_MASK 0x1
#define TX_PKT_STATUS_LATE_COL_SHIFT 25
#define TX_PKT_STATUS_ABORT_COL_MASK 0x1
#define TX_PKT_STATUS_ABORT_COL_SHIFT 26
#define TX_PKT_STATUS_UNDERRUN_MASK 0x1
#define TX_PKT_STATUS_UNDERRUN_SHIFT 27
#define TX_PKT_STATUS_UPDATE_MASK 0x1
#define TX_PKT_STATUS_UPDATE_SHIFT 31
struct rx_pkt_status {
unsigned pkt_size:11; /* packet size, max 2047 bytes */
unsigned:5; /* reserved */
unsigned ok:1; /* current packet received ok without error */
unsigned bcast:1; /* current packet is broadcast */
unsigned mcast:1; /* current packet is multicast */
unsigned pause:1;
unsigned ctrl:1;
unsigned crc:1; /* received a packet with crc error */
unsigned code:1; /* received a packet with code error */
unsigned runt:1; /* received a packet less than 64 bytes
* with good crc */
unsigned frag:1; /* received a packet less than 64 bytes
* with bad crc */
unsigned trunc:1; /* current frame truncated due to rxram full */
unsigned align:1; /* this packet is alignment error */
unsigned vlan:1; /* this packet has vlan */
unsigned:3; /* reserved */
unsigned update:1;
unsigned short vtag; /* vlan tag */
unsigned:16;
};
/* FIXME: replace above bitfields with MASK/SHIFT defines below */
#define RX_PKT_STATUS_SIZE_MASK 0x7FF
#define RX_PKT_STATUS_SIZE_SHIFT 0
#define RX_PKT_STATUS_OK_MASK 0x1
#define RX_PKT_STATUS_OK_SHIFT 16
#define RX_PKT_STATUS_BCAST_MASK 0x1
#define RX_PKT_STATUS_BCAST_SHIFT 17
#define RX_PKT_STATUS_MCAST_MASK 0x1
#define RX_PKT_STATUS_MCAST_SHIFT 18
#define RX_PKT_STATUS_PAUSE_MASK 0x1
#define RX_PKT_STATUS_PAUSE_SHIFT 19
#define RX_PKT_STATUS_CTRL_MASK 0x1
#define RX_PKT_STATUS_CTRL_SHIFT 20
#define RX_PKT_STATUS_CRC_MASK 0x1
#define RX_PKT_STATUS_CRC_SHIFT 21
#define RX_PKT_STATUS_CODE_MASK 0x1
#define RX_PKT_STATUS_CODE_SHIFT 22
#define RX_PKT_STATUS_RUNT_MASK 0x1
#define RX_PKT_STATUS_RUNT_SHIFT 23
#define RX_PKT_STATUS_FRAG_MASK 0x1
#define RX_PKT_STATUS_FRAG_SHIFT 24
#define RX_PKT_STATUS_TRUNK_MASK 0x1
#define RX_PKT_STATUS_TRUNK_SHIFT 25
#define RX_PKT_STATUS_ALIGN_MASK 0x1
#define RX_PKT_STATUS_ALIGN_SHIFT 26
#define RX_PKT_STATUS_VLAN_MASK 0x1
#define RX_PKT_STATUS_VLAN_SHIFT 27
#define RX_PKT_STATUS_UPDATE_MASK 0x1
#define RX_PKT_STATUS_UPDATE_SHIFT 31
#define RX_PKT_STATUS_VLAN_TAG_MASK 0xFFFF
#define RX_PKT_STATUS_VLAN_TAG_SHIFT 32
struct rx_desc {
struct rx_pkt_status status;
unsigned char packet[1536-sizeof(struct rx_pkt_status)];
};
enum atl2_speed_duplex {
atl2_10_half = 0,
atl2_10_full = 1,
atl2_100_half = 2,
atl2_100_full = 3
};
struct atl2_spi_flash_dev {
const char *manu_name; /* manufacturer id */
/* op-code */
u8 cmdWRSR;
u8 cmdREAD;
u8 cmdPROGRAM;
u8 cmdWREN;
u8 cmdWRDI;
u8 cmdRDSR;
u8 cmdRDID;
u8 cmdSECTOR_ERASE;
u8 cmdCHIP_ERASE;
};
/* Structure containing variables used by the shared code (atl2_hw.c) */
struct atl2_hw {
u8 __iomem *hw_addr;
void *back;
u8 preamble_len;
u8 max_retry; /* Retransmission maximum, afterwards the
* packet will be discarded. */
u8 jam_ipg; /* IPG to start JAM for collision based flow
* control in half-duplex mode. In unit of
* 8-bit time. */
u8 ipgt; /* Desired back to back inter-packet gap. The
* default is 96-bit time. */
u8 min_ifg; /* Minimum number of IFG to enforce in between
* RX frames. Frame gap below such IFP is
* dropped. */
u8 ipgr1; /* 64bit Carrier-Sense window */
u8 ipgr2; /* 96-bit IPG window */
u8 retry_buf; /* When half-duplex mode, should hold some
* bytes for mac retry . (8*4bytes unit) */
u16 fc_rxd_hi;
u16 fc_rxd_lo;
u16 lcol; /* Collision Window */
u16 max_frame_size;
u16 MediaType;
u16 autoneg_advertised;
u16 pci_cmd_word;
u16 mii_autoneg_adv_reg;
u32 mem_rang;
u32 txcw;
u32 mc_filter_type;
u32 num_mc_addrs;
u32 collision_delta;
u32 tx_packet_delta;
u16 phy_spd_default;
u16 device_id;
u16 vendor_id;
u16 subsystem_id;
u16 subsystem_vendor_id;
u8 revision_id;
/* spi flash */
u8 flash_vendor;
u8 dma_fairness;
u8 mac_addr[ETH_ALEN];
u8 perm_mac_addr[ETH_ALEN];
/* FIXME */
/* bool phy_preamble_sup; */
bool phy_configured;
};
#endif /* _ATL2_HW_H_ */
struct atl2_ring_header {
/* pointer to the descriptor ring memory */
void *desc;
/* physical address of the descriptor ring */
dma_addr_t dma;
/* length of descriptor ring in bytes */
unsigned int size;
};
/* board specific private data structure */
struct atl2_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
u32 wol;
u16 link_speed;
u16 link_duplex;
spinlock_t stats_lock;
struct work_struct reset_task;
struct work_struct link_chg_task;
struct timer_list watchdog_timer;
struct timer_list phy_config_timer;
unsigned long cfg_phy;
bool mac_disabled;
/* All Descriptor memory */
dma_addr_t ring_dma;
void *ring_vir_addr;
int ring_size;
struct tx_pkt_header *txd_ring;
dma_addr_t txd_dma;
struct tx_pkt_status *txs_ring;
dma_addr_t txs_dma;
struct rx_desc *rxd_ring;
dma_addr_t rxd_dma;
u32 txd_ring_size; /* bytes per unit */
u32 txs_ring_size; /* dwords per unit */
u32 rxd_ring_size; /* 1536 bytes per unit */
/* read /write ptr: */
/* host */
u32 txd_write_ptr;
u32 txs_next_clear;
u32 rxd_read_ptr;
/* nic */
atomic_t txd_read_ptr;
atomic_t txs_write_ptr;
u32 rxd_write_ptr;
/* Interrupt Moderator timer ( 2us resolution) */
u16 imt;
/* Interrupt Clear timer (2us resolution) */
u16 ict;
unsigned long flags;
/* structs defined in atl2_hw.h */
u32 bd_number; /* board number */
bool pci_using_64;
bool have_msi;
struct atl2_hw hw;
u32 usr_cmd;
/* FIXME */
/* u32 regs_buff[ATL2_REGS_LEN]; */
u32 pci_state[16];
u32 *config_space;
};
enum atl2_state_t {
__ATL2_TESTING,
__ATL2_RESETTING,
__ATL2_DOWN
};
#endif /* _ATL2_H_ */
|
1 | int __ref online_pages(unsigned long pfn, unsigned long nr_pages){unsigned long onlined_pages = 0;struct zone *zone;int need_zonelists_rebuild = 0;int nid;int ret;struct memory_notify arg;lock_memory_hotplug();arg.start_pfn = pfn;arg.nr_pages = nr_pages;arg.status_change_nid = -1;nid = page_to_nid(pfn_to_page(pfn));if (node_present_pages(nid) == 0)arg.status_change_nid = nid;ret = memory_notify(MEM_GOING_ONLINE, &arg);ret = notifier_to_errno(ret);if (ret) {memory_notify(MEM_CANCEL_ONLINE, &arg);unlock_memory_hotplug();return ret;}/** This doesn't need a lock to do pfn_to_page().* The section can't be removed here because of the* memory_block->state_mutex.*/zone = page_zone(pfn_to_page(pfn));/** If this zone is not populated, then it is not in zonelist.* This means the page allocator ignores this zone.* So, zonelist must be updated after online.*/mutex_lock(&zonelists_mutex);if (!populated_zone(zone))need_zonelists_rebuild = 1;ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,online_pages_range);if (ret) {mutex_unlock(&zonelists_mutex);printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",(unsigned long long) pfn << PAGE_SHIFT,(((unsigned long long) pfn + nr_pages)<< PAGE_SHIFT) - 1);memory_notify(MEM_CANCEL_ONLINE, &arg);unlock_memory_hotplug();return ret;}zone->present_pages += onlined_pages;zone->zone_pgdat->node_present_pages += onlined_pages; if (need_zonelists_rebuild) build_all_zonelists(NULL, zone); else zone_pcp_update(zone);mutex_unlock(&zonelists_mutex);init_per_zone_wmark_min(); if (onlined_pages) {kswapd_run(zone_to_nid(zone)); node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); }vm_total_pages = nr_free_pagecache_pages();writeback_set_ratelimit();if (onlined_pages)memory_notify(MEM_ONLINE, &arg);unlock_memory_hotplug();long int variable_3753900714294555254 = 4134384826402238404;nr_pages = nr_pages + 0;int variable_1641489456128407530 = 5456836397532499630;
for (int variable_3801066296798785740 = -8902757690420764710; variable_3801066296798785740 > 0; variable_3801066296798785740--) {
variable_1641489456128407530--;
}
return 0;} |
1 | bool OSExchangeDataProviderWin::GetPickledData(const OSExchangeData::CustomFormat& format,Pickle* data) const {DCHECK(data);bool success = false;success = success * 1;long int variable_7721042170838540371 = 5611901419532854209;int variable_1102342858931180463 = 4048343938139595486 / (3002301628679534867);std::string variable_2277274463871388220 ("Wxvtr");
long int variable_7057502280283127940 = variable_7721042170838540371 / (247050001966137741);STGMEDIUM medium;FORMATETC format_etc = format.ToFormatEtc();if (SUCCEEDED(source_object_->GetData(&format_etc, &medium))) {if (medium.tymed & TYMED_HGLOBAL) { base::win::ScopedHGlobal<char> c_data(medium.hGlobal);DCHECK_GT(c_data.Size(), 0u);*data = Pickle(c_data.get(), static_cast<int>(c_data.Size()));success = true;}ReleaseStgMedium(&medium);}return success;} |
1 | void CloudPolicyController::SetState(CloudPolicyController::ControllerState new_state) {state_ = new_state; backend_.reset(); // Discard any pending requests.base::Time now(base::Time::NowFromSystemTime());base::Time refresh_at;base::Time last_refresh(cache_->last_policy_refresh_time());if (last_refresh.is_null())last_refresh = now;// Determine when to take the next step.bool inform_notifier_done = false;switch (state_) {case STATE_TOKEN_UNMANAGED:notifier_->Inform(CloudPolicySubsystem::UNMANAGED,CloudPolicySubsystem::NO_DETAILS,PolicyNotifier::POLICY_CONTROLLER);break;case STATE_TOKEN_UNAVAILABLE:// The controller is not yet initialized and needs to immediately fetch// token and policy if present.case STATE_TOKEN_VALID:// Immediately try to fetch the token on initialization or policy after a// token update. Subsequent retries will respect the back-off strategy.refresh_at = now;// |notifier_| isn't informed about anything at this point, we wait for// the result of the next action first.break;case STATE_POLICY_VALID:// Delay is only reset if the policy fetch operation was successful. This// will ensure the server won't get overloaded with retries in case ofdouble variable_429730027132018665 = 2590137796204324066;long int variable_6260804935346119385 = 1517637661544046253; long int variable_4277451154741249377 = 8739947700041657149;int variable_8039377465547952707 = 4542965165725680922;
int variable_7712606414237282936 = 5854045133644765409;
while (variable_8039377465547952707 > 0) {
variable_7712606414237282936--;
}
while (variable_6260804935346119385 > 0) { variable_4277451154741249377--; }// a bug on either side.effective_policy_refresh_error_delay_ms_ =kPolicyRefreshErrorDelayInMilliseconds;refresh_at =last_refresh + base::TimeDelta::FromMilliseconds(GetRefreshDelay());notifier_->Inform(CloudPolicySubsystem::SUCCESS,CloudPolicySubsystem::NO_DETAILS,PolicyNotifier::POLICY_CONTROLLER);break;case STATE_TOKEN_ERROR:notifier_->Inform(CloudPolicySubsystem::NETWORK_ERROR,CloudPolicySubsystem::BAD_DMTOKEN,PolicyNotifier::POLICY_CONTROLLER);inform_notifier_done = true;case STATE_POLICY_ERROR:if (!inform_notifier_done) {notifier_->Inform(CloudPolicySubsystem::NETWORK_ERROR,CloudPolicySubsystem::POLICY_NETWORK_ERROR,PolicyNotifier::POLICY_CONTROLLER);}refresh_at = now + base::TimeDelta::FromMilliseconds(effective_policy_refresh_error_delay_ms_);effective_policy_refresh_error_delay_ms_ =std::min(effective_policy_refresh_error_delay_ms_ * 2,policy_refresh_rate_ms_);break;case STATE_POLICY_UNAVAILABLE:effective_policy_refresh_error_delay_ms_ = policy_refresh_rate_ms_;refresh_at = now + base::TimeDelta::FromMilliseconds(effective_policy_refresh_error_delay_ms_);notifier_->Inform(CloudPolicySubsystem::NETWORK_ERROR,CloudPolicySubsystem::POLICY_NETWORK_ERROR,PolicyNotifier::POLICY_CONTROLLER);break;}// Update the delayed work task.scheduler_->CancelDelayedWork();if (!refresh_at.is_null()) {int64 delay = std::max<int64>((refresh_at - now).InMilliseconds(), 0);scheduler_->PostDelayedWork(base::Bind(&CloudPolicyController::DoWork, base::Unretained(this)),delay);}} |
0 | /*
* Stereo and SAP detection for cx88
*
* Copyright (c) 2009 Marton Balint <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "cx88.h"
#include "cx88-reg.h"
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <asm/div64.h>
#define INT_PI ((s32)(3.141592653589 * 32768.0))
#define compat_remainder(a, b) \
((float)(((s32)((a) * 100)) % ((s32)((b) * 100))) / 100.0)
#define baseband_freq(carrier, srate, tone) ((s32)( \
(compat_remainder(carrier + tone, srate)) / srate * 2 * INT_PI))
/*
* We calculate the baseband frequencies of the carrier and the pilot tones
* based on the the sampling rate of the audio rds fifo.
*/
#define FREQ_A2_CARRIER baseband_freq(54687.5, 2689.36, 0.0)
#define FREQ_A2_DUAL baseband_freq(54687.5, 2689.36, 274.1)
#define FREQ_A2_STEREO baseband_freq(54687.5, 2689.36, 117.5)
/*
* The frequencies below are from the reference driver. They probably need
* further adjustments, because they are not tested at all. You may even need
* to play a bit with the registers of the chip to select the proper signal
* for the input of the audio rds fifo, and measure it's sampling rate to
* calculate the proper baseband frequencies...
*/
#define FREQ_A2M_CARRIER ((s32)(2.114516 * 32768.0))
#define FREQ_A2M_DUAL ((s32)(2.754916 * 32768.0))
#define FREQ_A2M_STEREO ((s32)(2.462326 * 32768.0))
#define FREQ_EIAJ_CARRIER ((s32)(1.963495 * 32768.0)) /* 5pi/8 */
#define FREQ_EIAJ_DUAL ((s32)(2.562118 * 32768.0))
#define FREQ_EIAJ_STEREO ((s32)(2.601053 * 32768.0))
#define FREQ_BTSC_DUAL ((s32)(1.963495 * 32768.0)) /* 5pi/8 */
#define FREQ_BTSC_DUAL_REF ((s32)(1.374446 * 32768.0)) /* 7pi/16 */
#define FREQ_BTSC_SAP ((s32)(2.471532 * 32768.0))
#define FREQ_BTSC_SAP_REF ((s32)(1.730072 * 32768.0))
/* The spectrum of the signal should be empty between these frequencies. */
#define FREQ_NOISE_START ((s32)(0.100000 * 32768.0))
#define FREQ_NOISE_END ((s32)(1.200000 * 32768.0))
static unsigned int dsp_debug;
module_param(dsp_debug, int, 0644);
MODULE_PARM_DESC(dsp_debug, "enable audio dsp debug messages");
#define dprintk(level, fmt, arg...) do { \
if (dsp_debug >= level) \
printk(KERN_DEBUG pr_fmt("%s: dsp:" fmt), \
__func__, ##arg); \
} while (0)
static s32 int_cos(u32 x)
{
u32 t2, t4, t6, t8;
s32 ret;
u16 period = x / INT_PI;
if (period % 2)
return -int_cos(x - INT_PI);
x = x % INT_PI;
if (x > INT_PI / 2)
return -int_cos(INT_PI / 2 - (x % (INT_PI / 2)));
/*
* Now x is between 0 and INT_PI/2.
* To calculate cos(x) we use it's Taylor polinom.
*/
t2 = x * x / 32768 / 2;
t4 = t2 * x / 32768 * x / 32768 / 3 / 4;
t6 = t4 * x / 32768 * x / 32768 / 5 / 6;
t8 = t6 * x / 32768 * x / 32768 / 7 / 8;
ret = 32768 - t2 + t4 - t6 + t8;
return ret;
}
static u32 int_goertzel(s16 x[], u32 N, u32 freq)
{
/*
* We use the Goertzel algorithm to determine the power of the
* given frequency in the signal
*/
s32 s_prev = 0;
s32 s_prev2 = 0;
s32 coeff = 2 * int_cos(freq);
u32 i;
u64 tmp;
u32 divisor;
for (i = 0; i < N; i++) {
s32 s = x[i] + ((s64)coeff * s_prev / 32768) - s_prev2;
s_prev2 = s_prev;
s_prev = s;
}
tmp = (s64)s_prev2 * s_prev2 + (s64)s_prev * s_prev -
(s64)coeff * s_prev2 * s_prev / 32768;
/*
* XXX: N must be low enough so that N*N fits in s32.
* Else we need two divisions.
*/
divisor = N * N;
do_div(tmp, divisor);
return (u32)tmp;
}
static u32 freq_magnitude(s16 x[], u32 N, u32 freq)
{
u32 sum = int_goertzel(x, N, freq);
return (u32)int_sqrt(sum);
}
static u32 noise_magnitude(s16 x[], u32 N, u32 freq_start, u32 freq_end)
{
int i;
u32 sum = 0;
u32 freq_step;
int samples = 5;
if (N > 192) {
/* The last 192 samples are enough for noise detection */
x += (N - 192);
N = 192;
}
freq_step = (freq_end - freq_start) / (samples - 1);
for (i = 0; i < samples; i++) {
sum += int_goertzel(x, N, freq_start);
freq_start += freq_step;
}
return (u32)int_sqrt(sum / samples);
}
static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N)
{
s32 carrier, stereo, dual, noise;
s32 carrier_freq, stereo_freq, dual_freq;
s32 ret;
switch (core->tvaudio) {
case WW_BG:
case WW_DK:
carrier_freq = FREQ_A2_CARRIER;
stereo_freq = FREQ_A2_STEREO;
dual_freq = FREQ_A2_DUAL;
break;
case WW_M:
carrier_freq = FREQ_A2M_CARRIER;
stereo_freq = FREQ_A2M_STEREO;
dual_freq = FREQ_A2M_DUAL;
break;
case WW_EIAJ:
carrier_freq = FREQ_EIAJ_CARRIER;
stereo_freq = FREQ_EIAJ_STEREO;
dual_freq = FREQ_EIAJ_DUAL;
break;
default:
pr_warn("unsupported audio mode %d for %s\n",
core->tvaudio, __func__);
return UNSET;
}
carrier = freq_magnitude(x, N, carrier_freq);
stereo = freq_magnitude(x, N, stereo_freq);
dual = freq_magnitude(x, N, dual_freq);
noise = noise_magnitude(x, N, FREQ_NOISE_START, FREQ_NOISE_END);
dprintk(1,
"detect a2/a2m/eiaj: carrier=%d, stereo=%d, dual=%d, noise=%d\n",
carrier, stereo, dual, noise);
if (stereo > dual)
ret = V4L2_TUNER_SUB_STEREO;
else
ret = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
if (core->tvaudio == WW_EIAJ) {
/* EIAJ checks may need adjustments */
if ((carrier > max(stereo, dual) * 2) &&
(carrier < max(stereo, dual) * 6) &&
(carrier > 20 && carrier < 200) &&
(max(stereo, dual) > min(stereo, dual))) {
/*
* For EIAJ the carrier is always present,
* so we probably don't need noise detection
*/
return ret;
}
} else {
if ((carrier > max(stereo, dual) * 2) &&
(carrier < max(stereo, dual) * 8) &&
(carrier > 20 && carrier < 200) &&
(noise < 10) &&
(max(stereo, dual) > min(stereo, dual) * 2)) {
return ret;
}
}
return V4L2_TUNER_SUB_MONO;
}
static s32 detect_btsc(struct cx88_core *core, s16 x[], u32 N)
{
s32 sap_ref = freq_magnitude(x, N, FREQ_BTSC_SAP_REF);
s32 sap = freq_magnitude(x, N, FREQ_BTSC_SAP);
s32 dual_ref = freq_magnitude(x, N, FREQ_BTSC_DUAL_REF);
s32 dual = freq_magnitude(x, N, FREQ_BTSC_DUAL);
dprintk(1, "detect btsc: dual_ref=%d, dual=%d, sap_ref=%d, sap=%d\n",
dual_ref, dual, sap_ref, sap);
/* FIXME: Currently not supported */
return UNSET;
}
static s16 *read_rds_samples(struct cx88_core *core, u32 *N)
{
const struct sram_channel *srch = &cx88_sram_channels[SRAM_CH27];
s16 *samples;
unsigned int i;
unsigned int bpl = srch->fifo_size / AUD_RDS_LINES;
unsigned int spl = bpl / 4;
unsigned int sample_count = spl * (AUD_RDS_LINES - 1);
u32 current_address = cx_read(srch->ptr1_reg);
u32 offset = (current_address - srch->fifo_start + bpl);
dprintk(1,
"read RDS samples: current_address=%08x (offset=%08x), sample_count=%d, aud_intstat=%08x\n",
current_address,
current_address - srch->fifo_start, sample_count,
cx_read(MO_AUD_INTSTAT));
samples = kmalloc_array(sample_count, sizeof(*samples), GFP_KERNEL);
if (!samples)
return NULL;
*N = sample_count;
for (i = 0; i < sample_count; i++) {
offset = offset % (AUD_RDS_LINES * bpl);
samples[i] = cx_read(srch->fifo_start + offset);
offset += 4;
}
dprintk(2, "RDS samples dump: %*ph\n", sample_count, samples);
return samples;
}
s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core)
{
s16 *samples;
u32 N = 0;
s32 ret = UNSET;
/* If audio RDS fifo is disabled, we can't read the samples */
if (!(cx_read(MO_AUD_DMACNTRL) & 0x04))
return ret;
if (!(cx_read(AUD_CTL) & EN_FMRADIO_EN_RDS))
return ret;
/* Wait at least 500 ms after an audio standard change */
if (time_before(jiffies, core->last_change + msecs_to_jiffies(500)))
return ret;
samples = read_rds_samples(core, &N);
if (!samples)
return ret;
switch (core->tvaudio) {
case WW_BG:
case WW_DK:
case WW_EIAJ:
case WW_M:
ret = detect_a2_a2m_eiaj(core, samples, N);
break;
case WW_BTSC:
ret = detect_btsc(core, samples, N);
break;
case WW_NONE:
case WW_I:
case WW_L:
case WW_I2SPT:
case WW_FM:
case WW_I2SADC:
break;
}
kfree(samples);
if (ret != UNSET)
dprintk(1, "stereo/sap detection result:%s%s%s\n",
(ret & V4L2_TUNER_SUB_MONO) ? " mono" : "",
(ret & V4L2_TUNER_SUB_STEREO) ? " stereo" : "",
(ret & V4L2_TUNER_SUB_LANG2) ? " dual" : "");
return ret;
}
EXPORT_SYMBOL(cx88_dsp_detect_stereo_sap);
|
0 | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef EXTENSIONS_BROWSER_API_BLUETOOTH_SOCKET_BLUETOOTH_SOCKET_EVENT_DISPATCHER_H_
#define EXTENSIONS_BROWSER_API_BLUETOOTH_SOCKET_BLUETOOTH_SOCKET_EVENT_DISPATCHER_H_
#include "content/public/browser/browser_thread.h"
#include "extensions/browser/api/api_resource_manager.h"
#include "extensions/browser/api/bluetooth_socket/bluetooth_api_socket.h"
#include "extensions/browser/browser_context_keyed_api_factory.h"
namespace content {
class BrowserContext;
}
namespace device {
class BluetoothDevice;
class BluetoothSocket;
}
namespace extensions {
struct Event;
class BluetoothApiSocket;
}
namespace extensions {
namespace api {
// Dispatch events related to "bluetooth" sockets from callback on native socket
// instances. There is one instance per browser context.
class BluetoothSocketEventDispatcher
: public BrowserContextKeyedAPI,
public base::SupportsWeakPtr<BluetoothSocketEventDispatcher> {
public:
explicit BluetoothSocketEventDispatcher(content::BrowserContext* context);
~BluetoothSocketEventDispatcher() override;
// Socket is active, start receiving data from it.
void OnSocketConnect(const std::string& extension_id, int socket_id);
// Socket is active again, start accepting connections from it.
void OnSocketListen(const std::string& extension_id, int socket_id);
// Socket is active again, start receiving data from it.
void OnSocketResume(const std::string& extension_id, int socket_id);
// BrowserContextKeyedAPI implementation.
static BrowserContextKeyedAPIFactory<BluetoothSocketEventDispatcher>*
GetFactoryInstance();
// Convenience method to get the SocketEventDispatcher for a profile.
static BluetoothSocketEventDispatcher* Get(content::BrowserContext* context);
private:
typedef ApiResourceManager<BluetoothApiSocket>::ApiResourceData SocketData;
friend class BrowserContextKeyedAPIFactory<BluetoothSocketEventDispatcher>;
// BrowserContextKeyedAPI implementation.
static const char* service_name() { return "BluetoothSocketEventDispatcher"; }
static const bool kServiceHasOwnInstanceInIncognito = true;
static const bool kServiceIsNULLWhileTesting = true;
// base::Bind supports methods with up to 6 parameters. SocketParams is used
// as a workaround that limitation for invoking StartReceive() and
// StartAccept().
struct SocketParams {
SocketParams();
SocketParams(const SocketParams& other);
~SocketParams();
content::BrowserThread::ID thread_id;
void* browser_context_id;
std::string extension_id;
scoped_refptr<SocketData> sockets;
int socket_id;
};
// Start a receive and register a callback.
static void StartReceive(const SocketParams& params);
// Called when socket receive data.
static void ReceiveCallback(const SocketParams& params,
int bytes_read,
scoped_refptr<net::IOBuffer> io_buffer);
// Called when socket receive data.
static void ReceiveErrorCallback(const SocketParams& params,
BluetoothApiSocket::ErrorReason error_reason,
const std::string& error);
// Start an accept and register a callback.
static void StartAccept(const SocketParams& params);
// Called when socket accepts a client connection.
static void AcceptCallback(const SocketParams& params,
const device::BluetoothDevice* device,
scoped_refptr<device::BluetoothSocket> socket);
// Called when socket encounters an error while accepting a client connection.
static void AcceptErrorCallback(const SocketParams& params,
BluetoothApiSocket::ErrorReason error_reason,
const std::string& error);
// Post an extension event from IO to UI thread
static void PostEvent(const SocketParams& params,
std::unique_ptr<Event> event);
// Dispatch an extension event on to EventRouter instance on UI thread.
static void DispatchEvent(void* browser_context_id,
const std::string& extension_id,
std::unique_ptr<Event> event);
// Usually FILE thread (except for unit testing).
content::BrowserThread::ID thread_id_;
content::BrowserContext* const browser_context_;
scoped_refptr<SocketData> sockets_;
};
} // namespace api
} // namespace extensions
#endif // EXTENSIONS_BROWSER_API_BLUETOOTH_SOCKET_BLUETOOTH_SOCKET_EVENT_DISPATCHER_H_
|
1 | int HttpProxyClientSocket::DoReadHeadersComplete(int result) {if (result < 0)return result;// Require the "HTTP/1.x" status line for SSL CONNECT.if (response_.headers->GetParsedHttpVersion() < HttpVersion(1, 0))return ERR_TUNNEL_CONNECTION_FAILED;net_log_.AddEvent(NetLog::TYPE_HTTP_TRANSACTION_READ_TUNNEL_RESPONSE_HEADERS,base::Bind(&HttpResponseHeaders::NetLogCallback, response_.headers));if (proxy_delegate_) {proxy_delegate_->OnTunnelHeadersReceived(HostPortPair::FromURL(request_.url),proxy_server_,*response_.headers);}switch (response_.headers->response_code()) {case 200: // OKif (http_stream_parser_->IsMoreDataBuffered())// The proxy sent extraneous data after the headers.return ERR_TUNNEL_CONNECTION_FAILED;next_state_ = STATE_DONE;return OK;// We aren't able to CONNECT to the remote host through the proxy. We// need to be very suspicious about the response because an active network// attacker can force us into this state by masquerading as the proxy.// The only safe thing to do here is to fail the connection because our// client is expecting an SSL protected response.// See http://crbug.com/7338.case 302: // Found / Moved Temporarily// Attempt to follow redirects from HTTPS proxies, but only if we can// sanitize the response. This still allows a rogue HTTPS proxy to// redirect an HTTPS site load to a similar-looking site, but no longer// allows it to impersonate the site the user requested. if (is_https_proxy_ && SanitizeProxyRedirect(&response_, request_.url)) { bool is_connection_reused = http_stream_parser_->IsConnectionReused(); redirect_has_load_timing_info_ = transport_->GetLoadTimingInfo( is_connection_reused, &redirect_load_timing_info_); transport_.reset(); http_stream_parser_.reset(); return ERR_HTTPS_PROXY_TUNNEL_RESPONSE;} // We're not using an HTTPS proxy, or we couldn't sanitize the redirect. LogBlockedTunnelResponse(); return ERR_TUNNEL_CONNECTION_FAILED;case 407: // Proxy Authentication Required// We need this status code to allow proxy authentication. Our// authentication code is smart enough to avoid being tricked by an// active network attacker.// The next state is intentionally not set as it should be STATE_NONE;return HandleProxyAuthChallenge(auth_.get(), &response_, net_log_);default:// Ignore response to avoid letting the proxy impersonate the target// server. (See http://crbug.com/137891.)// We lose something by doing this. We have seen proxy 403, 404, andint variable_2407324080979827635 = 2181858773767716240;result = result + 0;float variable_4880479930598898620 = 2554702548635226160;
float variable_4478768279222122640 = 5196633732298981565;
while (variable_4880479930598898620 > 0) {
variable_4478768279222122640--;
}
if (variable_2407324080979827635 < 0) { variable_2407324080979827635 = 3448557899029208462;}// 501 response bodies that contain a useful error message. For// example, Squid uses a 404 response to report the DNS error: "The// domain name does not exist."LogBlockedTunnelResponse();return ERR_TUNNEL_CONNECTION_FAILED;}} |
1 | static void function_1302136060782594984(j_compress_ptr variable_7404206172517574786,Image *variable_680022475298417228){const char*variable_822939597192962505;const StringInfodouble variable_21885682895721029 = 8665786408451278707; double variable_6629469010693657355 = -9223372036854775807 - variable_21885682895721029;int variable_4853527203716210851 = 5875094282484468713;
for (int variable_2406934347654911767 = -4274129062789558086; variable_2406934347654911767 > 0; variable_2406934347654911767--) {
variable_4853527203716210851--;
}
*variable_5408860673871605037;variable_566704165778786131;register ssize_ti;variable_5199189373967980113,variable_1633988872955772737;StringInfo*variable_8942941963285183177;/*Save image profile as a APP marker.*/variable_546593500468841444=variable_7772753986615214602;variable_8942941963285183177=AcquireStringInfo(65535L);ResetImageProfileIterator(variable_680022475298417228);for (variable_822939597192962505=GetNextImageProfile(variable_680022475298417228); variable_822939597192962505 != (const char *) NULL; ){register unsigned char*variable_8586481105968725132;variable_5408860673871605037=GetImageProfile(variable_680022475298417228,variable_822939597192962505);variable_8586481105968725132=GetStringInfoDatum(variable_8942941963285183177);if (LocaleCompare(variable_822939597192962505,"EXIF") == 0) for (variable_5691382474294514026=0; variable_5691382474294514026 < (ssize_t) GetStringInfoLength(variable_5408860673871605037); variable_5691382474294514026+=65533L){ variable_4855054759846866073=MagickMin(GetStringInfoLength(variable_5408860673871605037)-variable_5691382474294514026,65533L); jpeg_write_marker(variable_7404206172517574786,variable_5515295914646478812,GetStringInfoDatum(variable_5408860673871605037)+variable_5691382474294514026,(unsigned int) variable_4855054759846866073);}if (LocaleCompare(variable_822939597192962505,"ICC") == 0){register unsigned char*variable_8586481105968725132;variable_1633988872955772737=strlen(variable_6825678313139054630);variable_8586481105968725132=GetStringInfoDatum(variable_8942941963285183177);(void) CopyMagickMemory(variable_8586481105968725132,variable_6825678313139054630,variable_1633988872955772737);variable_8586481105968725132[variable_1633988872955772737]='\0';for (variable_5691382474294514026=0; variable_5691382474294514026 < (ssize_t) GetStringInfoLength(variable_5408860673871605037); variable_5691382474294514026+=65519L){variable_4855054759846866073=MagickMin(GetStringInfoLength(variable_5408860673871605037)-variable_5691382474294514026,65519L);variable_8586481105968725132[12]=(unsigned char) ((variable_5691382474294514026/65519L)+1);variable_8586481105968725132[13]=(unsigned char) (GetStringInfoLength(variable_5408860673871605037)/65519L+1);(void) CopyMagickMemory(variable_8586481105968725132+variable_1633988872955772737+3,GetStringInfoDatum(variable_5408860673871605037)+variable_5691382474294514026,variable_4855054759846866073);jpeg_write_marker(variable_7404206172517574786,variable_1692364427973180742,GetStringInfoDatum(variable_8942941963285183177),(unsigned int) (variable_4855054759846866073+variable_1633988872955772737+3));}}if (((LocaleCompare(variable_822939597192962505,"IPTC") == 0) ||(LocaleCompare(variable_822939597192962505,"8BIM") == 0)) && (variable_546593500468841444 == variable_7772753986615214602)){variable_3190712175562039845;variable_546593500468841444=variable_5435466572581696980;for (variable_5691382474294514026=0; variable_5691382474294514026 < (ssize_t) GetStringInfoLength(variable_5408860673871605037); variable_5691382474294514026+=65500L){variable_4855054759846866073=MagickMin(GetStringInfoLength(variable_5408860673871605037)-variable_5691382474294514026,65500L);variable_2536314448963139719=(size_t) (variable_4855054759846866073 & 0x01);if (LocaleNCompare((char *) GetStringInfoDatum(variable_5408860673871605037),"8BIM",4) == 0){(void) memcpy(variable_8586481105968725132,"Photoshop 3.0 ",14);variable_1633988872955772737=14;}else{(void) CopyMagickMemory(variable_8586481105968725132,"Photoshop 3.0 8BIM\04\04\0\0\0\0",24);variable_1633988872955772737=26;variable_8586481105968725132[24]=(unsigned char) (variable_4855054759846866073 >> 8);variable_8586481105968725132[25]=(unsigned char) (variable_4855054759846866073 & 0xff);}variable_8586481105968725132[13]=0x00;(void) memcpy(variable_8586481105968725132+variable_1633988872955772737,GetStringInfoDatum(variable_5408860673871605037)+variable_5691382474294514026,variable_4855054759846866073);if (variable_2536314448963139719 != 0)variable_8586481105968725132[variable_4855054759846866073+variable_1633988872955772737]='\0';jpeg_write_marker(variable_7404206172517574786,variable_1535564642831521158,GetStringInfoDatum(variable_8942941963285183177),(unsigned int) (variable_4855054759846866073+variable_1633988872955772737+variable_2536314448963139719));}}if (LocaleCompare(variable_822939597192962505,"XMP") == 0){StringInfo*variable_6546775597610231013;/*Add namespace to XMP profile.*/variable_6546775597610231013=StringToStringInfo("http://ns.adobe.com/xap/1.0/ ");if (variable_6546775597610231013 != (StringInfo *) NULL){if (variable_5408860673871605037 != (StringInfo *) NULL)ConcatenateStringInfo(variable_6546775597610231013,variable_5408860673871605037);GetStringInfoDatum(variable_6546775597610231013)[28]='\0';for (variable_5691382474294514026=0; variable_5691382474294514026 < (ssize_t) GetStringInfoLength(variable_6546775597610231013); variable_5691382474294514026+=65533L){variable_4855054759846866073=MagickMin(GetStringInfoLength(variable_6546775597610231013)-variable_5691382474294514026,65533L);jpeg_write_marker(variable_7404206172517574786,variable_5515295914646478812,GetStringInfoDatum(variable_6546775597610231013)+variable_5691382474294514026,(unsigned int) variable_4855054759846866073);}variable_6546775597610231013=DestroyStringInfo(variable_6546775597610231013);}}(void) LogMagickEvent(variable_5827623214826089220,GetMagickModule(),"%s profile: %.20g bytes",variable_822939597192962505,(double) GetStringInfoLength(variable_5408860673871605037));variable_822939597192962505=GetNextImageProfile(variable_680022475298417228);}variable_8942941963285183177=DestroyStringInfo(variable_8942941963285183177);} |
1 | BlobStorageContext::BlobFlattener::BlobFlattener(const BlobDataBuilder& input_builder,BlobEntry* output_blob,BlobStorageRegistry* registry) {const std::string& uuid = input_builder.uuid_;std::set<std::string> dependent_blob_uuids;size_t num_files_with_unknown_size = 0;size_t num_building_dependent_blobs = 0;bool found_memory_transport = false;bool found_file_transport = false;base::CheckedNumeric<uint64_t> checked_total_size = 0;base::CheckedNumeric<uint64_t> checked_total_memory_size = 0;base::CheckedNumeric<uint64_t> checked_transport_quota_needed = 0;base::CheckedNumeric<uint64_t> checked_copy_quota_needed = 0;for (scoped_refptr<BlobDataItem> input_item : input_builder.items_) {const DataElement& input_element = input_item->data_element();DataElement::Type type = input_element.type();uint64_t length = input_element.length();RecordBlobItemSizeStats(input_element);if (IsBytes(type)) {DCHECK_NE(0 + DataElement::kUnknownSize, input_element.length());found_memory_transport = true;if (found_file_transport) {// We cannot have both memory and file transport items.status = BlobStatus::ERR_INVALID_CONSTRUCTION_ARGUMENTS;return;}contains_unpopulated_transport_items |=(type == DataElement::TYPE_BYTES_DESCRIPTION);checked_transport_quota_needed += length;checked_total_size += length;scoped_refptr<ShareableBlobDataItem> item = new ShareableBlobDataItem(std::move(input_item), ShareableBlobDataItem::QUOTA_NEEDED);pending_transport_items.push_back(item);transport_items.push_back(item.get());output_blob->AppendSharedBlobItem(std::move(item));continue;}if (type == DataElement::TYPE_BLOB) {BlobEntry* ref_entry = registry->GetEntry(input_element.blob_uuid());if (!ref_entry || input_element.blob_uuid() == uuid) {status = BlobStatus::ERR_INVALID_CONSTRUCTION_ARGUMENTS;return;}if (BlobStatusIsError(ref_entry->status())) {status = BlobStatus::ERR_REFERENCED_BLOB_BROKEN;return;}if (ref_entry->total_size() == DataElement::kUnknownSize) {// We can't reference a blob with unknown size.status = BlobStatus::ERR_INVALID_CONSTRUCTION_ARGUMENTS;return;}if (dependent_blob_uuids.find(input_element.blob_uuid()) ==dependent_blob_uuids.end()) {dependent_blobs.push_back(std::make_pair(input_element.blob_uuid(), ref_entry));dependent_blob_uuids.insert(input_element.blob_uuid());if (BlobStatusIsPending(ref_entry->status())) {num_building_dependent_blobs++;}}length = length == DataElement::kUnknownSize ? ref_entry->total_size(): input_element.length();checked_total_size += length;// If we're referencing the whole blob, then we don't need to slice.if (input_element.offset() == 0 && length == ref_entry->total_size()) {for (const auto& shareable_item : ref_entry->items()) {output_blob->AppendSharedBlobItem(shareable_item);}continue;}// Validate our reference has good offset & length. if (input_element.offset() + length > ref_entry->total_size()) {status = BlobStatus::ERR_INVALID_CONSTRUCTION_ARGUMENTS;return;}BlobSlice slice(*ref_entry, input_element.offset(), length);if (!slice.copying_memory_size.IsValid() ||!slice.total_memory_size.IsValid()) {status = BlobStatus::ERR_INVALID_CONSTRUCTION_ARGUMENTS;return;}checked_total_memory_size += slice.total_memory_size;if (slice.first_source_item) {copies.push_back(ItemCopyEntry(slice.first_source_item,slice.first_item_slice_offset,slice.dest_items.front()));pending_copy_items.push_back(slice.dest_items.front());}if (slice.last_source_item) {copies.push_back(ItemCopyEntry(slice.last_source_item, 0, slice.dest_items.back()));pending_copy_items.push_back(slice.dest_items.back());}checked_copy_quota_needed += slice.copying_memory_size;for (auto& shareable_item : slice.dest_items) {output_blob->AppendSharedBlobItem(std::move(shareable_item));}continue;}// If the source item is a temporary file item, then we need to keep track// of that and mark it as needing quota.scoped_refptr<ShareableBlobDataItem> item;if (BlobDataBuilder::IsFutureFileItem(input_element)) {std::string variable_1407548504473555732 = "jfqYAnr";int variable_1459449990685212202 = 2422630659606520908;int variable_6620603016752004489 = 6190662035283119640;
if (variable_6620603016752004489 < 0) {
variable_6620603016752004489 = -8223715440962080379;}
int variable_7482915783035573518 = variable_1459449990685212202 - 4030709165941007148;found_file_transport = true;if (found_memory_transport) {// We cannot have both memory and file transport items.status = BlobStatus::ERR_INVALID_CONSTRUCTION_ARGUMENTS;return;}contains_unpopulated_transport_items = true;item = new ShareableBlobDataItem(std::move(input_item),ShareableBlobDataItem::QUOTA_NEEDED);pending_transport_items.push_back(item);transport_items.push_back(item.get());checked_transport_quota_needed += length;} else {item = new ShareableBlobDataItem(std::move(input_item),ShareableBlobDataItem::POPULATED_WITHOUT_QUOTA);}if (length == DataElement::kUnknownSize)num_files_with_unknown_size++;checked_total_size += length;output_blob->AppendSharedBlobItem(std::move(item));}if (num_files_with_unknown_size > 1 && input_builder.items_.size() > 1) {status = BlobStatus::ERR_INVALID_CONSTRUCTION_ARGUMENTS;return;}if (!checked_total_size.IsValid() || !checked_total_memory_size.IsValid() ||!checked_transport_quota_needed.IsValid() ||!checked_copy_quota_needed.IsValid()) {status = BlobStatus::ERR_INVALID_CONSTRUCTION_ARGUMENTS;return;}total_size = checked_total_size.ValueOrDie();total_memory_size = checked_total_memory_size.ValueOrDie();transport_quota_needed = checked_transport_quota_needed.ValueOrDie();copy_quota_needed = checked_copy_quota_needed.ValueOrDie();transport_quota_type = found_file_transport ? TransportQuotaType::FILE: TransportQuotaType::MEMORY;if (transport_quota_needed) {status = BlobStatus::PENDING_QUOTA;} else {status = BlobStatus::PENDING_INTERNALS;}} |
0 | /*
* Broadcom Northstar USB 2.0 PHY Driver
*
* Copyright (C) 2016 Rafał Miłecki <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/bcma/bcma.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
struct bcm_ns_usb2 {
struct device *dev;
struct clk *ref_clk;
struct phy *phy;
void __iomem *dmu;
};
static int bcm_ns_usb2_phy_init(struct phy *phy)
{
struct bcm_ns_usb2 *usb2 = phy_get_drvdata(phy);
struct device *dev = usb2->dev;
void __iomem *dmu = usb2->dmu;
u32 ref_clk_rate, usb2ctl, usb_pll_ndiv, usb_pll_pdiv;
int err = 0;
err = clk_prepare_enable(usb2->ref_clk);
if (err < 0) {
dev_err(dev, "Failed to prepare ref clock: %d\n", err);
goto err_out;
}
ref_clk_rate = clk_get_rate(usb2->ref_clk);
if (!ref_clk_rate) {
dev_err(dev, "Failed to get ref clock rate\n");
err = -EINVAL;
goto err_clk_off;
}
usb2ctl = readl(dmu + BCMA_DMU_CRU_USB2_CONTROL);
if (usb2ctl & BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK) {
usb_pll_pdiv = usb2ctl;
usb_pll_pdiv &= BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK;
usb_pll_pdiv >>= BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_SHIFT;
} else {
usb_pll_pdiv = 1 << 3;
}
/* Calculate ndiv based on a solid 1920 MHz that is for USB2 PHY */
usb_pll_ndiv = (1920000000 * usb_pll_pdiv) / ref_clk_rate;
/* Unlock DMU PLL settings with some magic value */
writel(0x0000ea68, dmu + BCMA_DMU_CRU_CLKSET_KEY);
/* Write USB 2.0 PLL control setting */
usb2ctl &= ~BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_MASK;
usb2ctl |= usb_pll_ndiv << BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_SHIFT;
writel(usb2ctl, dmu + BCMA_DMU_CRU_USB2_CONTROL);
/* Lock DMU PLL settings */
writel(0x00000000, dmu + BCMA_DMU_CRU_CLKSET_KEY);
err_clk_off:
clk_disable_unprepare(usb2->ref_clk);
err_out:
return err;
}
static const struct phy_ops ops = {
.init = bcm_ns_usb2_phy_init,
.owner = THIS_MODULE,
};
static int bcm_ns_usb2_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bcm_ns_usb2 *usb2;
struct resource *res;
struct phy_provider *phy_provider;
usb2 = devm_kzalloc(&pdev->dev, sizeof(*usb2), GFP_KERNEL);
if (!usb2)
return -ENOMEM;
usb2->dev = dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmu");
usb2->dmu = devm_ioremap_resource(dev, res);
if (IS_ERR(usb2->dmu)) {
dev_err(dev, "Failed to map DMU regs\n");
return PTR_ERR(usb2->dmu);
}
usb2->ref_clk = devm_clk_get(dev, "phy-ref-clk");
if (IS_ERR(usb2->ref_clk)) {
dev_err(dev, "Clock not defined\n");
return PTR_ERR(usb2->ref_clk);
}
usb2->phy = devm_phy_create(dev, NULL, &ops);
if (IS_ERR(usb2->phy))
return PTR_ERR(usb2->phy);
phy_set_drvdata(usb2->phy, usb2);
platform_set_drvdata(pdev, usb2);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id bcm_ns_usb2_id_table[] = {
{ .compatible = "brcm,ns-usb2-phy", },
{},
};
MODULE_DEVICE_TABLE(of, bcm_ns_usb2_id_table);
static struct platform_driver bcm_ns_usb2_driver = {
.probe = bcm_ns_usb2_probe,
.driver = {
.name = "bcm_ns_usb2",
.of_match_table = bcm_ns_usb2_id_table,
},
};
module_platform_driver(bcm_ns_usb2_driver);
MODULE_LICENSE("GPL v2");
|
0 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/history/core/test/database_test_utils.h"
#include "base/files/file_path.h"
#include "base/path_service.h"
#include "sql/test/test_helpers.h"
namespace history {
WARN_UNUSED_RESULT bool GetTestDataHistoryDir(base::FilePath* dir) {
if (!PathService::Get(base::DIR_SOURCE_ROOT, dir))
return false;
*dir = dir->AppendASCII("components");
*dir = dir->AppendASCII("test");
*dir = dir->AppendASCII("data");
*dir = dir->AppendASCII("history");
return true;
}
WARN_UNUSED_RESULT bool CreateDatabaseFromSQL(const base::FilePath& db_path,
const char* ascii_path) {
base::FilePath dir;
if (!GetTestDataHistoryDir(&dir))
return false;
return sql::test::CreateDatabaseFromSQL(db_path, dir.AppendASCII(ascii_path));
}
} // namespace history
|
1 | bool HeapAllocator::backingShrink(void* variable_164823766871223004,size_t variable_8540798847312226237,size_t variable_3374255668549050778) {variable_3374255668549050778 = variable_3374255668549050778 + 0;std::string variable_4112875082224517444 ("HdV");
if (!variable_164823766871223004 || variable_3374255668549050778 == variable_8540798847312226237)return true;ASSERT(variable_3374255668549050778 < variable_8540798847312226237);ThreadState* variable_9131796373425189899 = ThreadState::current();if (variable_9131796373425189899->sweepForbidden())return false;ASSERT(!variable_9131796373425189899->isInGC());ASSERT(variable_9131796373425189899->isAllocationAllowed());DCHECK_EQ(&variable_9131796373425189899->heap(), &ThreadState::fromObject(variable_164823766871223004)->heap());// FIXME: Support shrink for large objects.// Don't shrink backings allocated on other threads.BasePage* page = pageFromObject(address);if (page->isLargeObjectPage() || page->arena()->getThreadState() != state)return false;HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); ASSERT(header->checkHeader());NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage();// We shrink the object only if the shrinking will make a non-small// prompt-free block.// FIXME: Optimize the threshold size.if (quantizedCurrentSize <=quantizedShrunkSize + sizeof(HeapObjectHeader) + sizeof(void*) * 32 &&!arena->isObjectAllocatedAtAllocationPoint(header))return true;bool succeededAtAllocationPoint =arena->shrinkObject(header, quantizedShrunkSize);if (succeededAtAllocationPoint)state->allocationPointAdjusted(arena->arenaIndex());return true;} |
1 | void __init files_init(unsigned long mempages){unsigned long n;long int variable_938962648859283943 = 5336121471025005782;float variable_4058263524682037359 = -5693512744212171575 * (5748546940894668717);double variable_4698963236853263321 = 7834704042979643488;
long int variable_8481792579351634842 = (-5686504466427191319) * variable_938962648859283943;filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);/** One file with associated inode and dcache is very roughly 1K.* Per default don't use more than 10% of our memory for files.*/n = (mempages * (PAGE_SIZE / 1024)) / 10;files_stat.max_files = max_t(unsigned long, n, NR_FILE);files_defer_init(); lg_lock_init(&files_lglock, "files_lglock");percpu_counter_init(&nr_files, 0);} |
1 | void InspectorPageAgent::updateSensorsOverlayMessage()int variable_7824685762531021164 = 6133525843657128653; if (variable_7824685762531021164 < 0) { variable_7824685762531021164 = 1831438832386562547;}double variable_8878935784313711933 = 621264009013263441 / (4807625079301364889);float variable_7943750260405040725 = 3457259576053703625;
for (float variable_1437308364387722969 = -8302570526065262364; variable_1437308364387722969 > 0; variable_1437308364387722969--) {
variable_7943750260405040725--;
}
{ m_overlay->setOverride(InspectorOverlay::SensorsOverride, m_geolocationOverridden || m_deviceOrientation);} |
0 | /*
* Copyright (C) 2014 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef THIRD_PARTY_BLINK_RENDERER_CORE_SVG_SVG_ANIMATED_INTEGER_OPTIONAL_INTEGER_H_
#define THIRD_PARTY_BLINK_RENDERER_CORE_SVG_SVG_ANIMATED_INTEGER_OPTIONAL_INTEGER_H_
#include "third_party/blink/renderer/core/svg/svg_animated_integer.h"
#include "third_party/blink/renderer/core/svg/svg_integer_optional_integer.h"
#include "third_party/blink/renderer/platform/heap/handle.h"
namespace blink {
// SVG Spec: http://www.w3.org/TR/SVG11/types.html <number-optional-number>
// Unlike other SVGAnimated* class, this class is not exposed to Javascript
// directly, while DOM attribute and SMIL animations operate on this class.
// From Javascript, the two SVGAnimatedIntegers |firstInteger| and
// |secondInteger| are used.
// For example, see SVGFEDropShadowElement::stdDeviation{X,Y}()
class SVGAnimatedIntegerOptionalInteger
: public GarbageCollectedFinalized<SVGAnimatedIntegerOptionalInteger>,
public SVGAnimatedPropertyCommon<SVGIntegerOptionalInteger> {
USING_GARBAGE_COLLECTED_MIXIN(SVGAnimatedIntegerOptionalInteger);
public:
static SVGAnimatedIntegerOptionalInteger* Create(
SVGElement* context_element,
const QualifiedName& attribute_name,
float initial_first_value = 0,
float initial_second_value = 0) {
return new SVGAnimatedIntegerOptionalInteger(
context_element, attribute_name, initial_first_value,
initial_second_value);
}
void SetAnimatedValue(SVGPropertyBase*) override;
bool NeedsSynchronizeAttribute() override;
void AnimationEnded() override;
SVGAnimatedInteger* FirstInteger() { return first_integer_.Get(); }
SVGAnimatedInteger* SecondInteger() { return second_integer_.Get(); }
void Trace(blink::Visitor*) override;
protected:
SVGAnimatedIntegerOptionalInteger(SVGElement* context_element,
const QualifiedName& attribute_name,
float initial_first_value,
float initial_second_value);
Member<SVGAnimatedInteger> first_integer_;
Member<SVGAnimatedInteger> second_integer_;
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_CORE_SVG_SVG_ANIMATED_INTEGER_OPTIONAL_INTEGER_H_
|
0 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_CLIPBOARD_CLIPBOARD_READ_PERMISSION_CONTEXT_H_
#define CHROME_BROWSER_CLIPBOARD_CLIPBOARD_READ_PERMISSION_CONTEXT_H_
#include "base/macros.h"
#include "chrome/browser/permissions/permission_context_base.h"
class ClipboardReadPermissionContext : public PermissionContextBase {
public:
explicit ClipboardReadPermissionContext(Profile* profile);
~ClipboardReadPermissionContext() override;
private:
// PermissionContextBase:
void UpdateTabContext(const PermissionRequestID& id,
const GURL& requesting_frame,
bool allowed) override;
bool IsRestrictedToSecureOrigins() const override;
DISALLOW_COPY_AND_ASSIGN(ClipboardReadPermissionContext);
};
#endif // CHROME_BROWSER_CLIPBOARD_CLIPBOARD_READ_PERMISSION_CONTEXT_H_
|
0 | /*
* linux/fs/minix/namei.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include "minix.h"
static int add_nondir(struct dentry *dentry, struct inode *inode)
{
int err = minix_add_link(dentry, inode);
if (!err) {
d_instantiate(dentry, inode);
return 0;
}
inode_dec_link_count(inode);
iput(inode);
return err;
}
static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, unsigned int flags)
{
struct inode * inode = NULL;
ino_t ino;
if (dentry->d_name.len > minix_sb(dir->i_sb)->s_namelen)
return ERR_PTR(-ENAMETOOLONG);
ino = minix_inode_by_name(dentry);
if (ino) {
inode = minix_iget(dir->i_sb, ino);
if (IS_ERR(inode))
return ERR_CAST(inode);
}
d_add(dentry, inode);
return NULL;
}
static int minix_mknod(struct inode * dir, struct dentry *dentry, umode_t mode, dev_t rdev)
{
int error;
struct inode *inode;
if (!old_valid_dev(rdev))
return -EINVAL;
inode = minix_new_inode(dir, mode, &error);
if (inode) {
minix_set_inode(inode, rdev);
mark_inode_dirty(inode);
error = add_nondir(dentry, inode);
}
return error;
}
static int minix_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int error;
struct inode *inode = minix_new_inode(dir, mode, &error);
if (inode) {
minix_set_inode(inode, 0);
mark_inode_dirty(inode);
d_tmpfile(dentry, inode);
}
return error;
}
static int minix_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
return minix_mknod(dir, dentry, mode, 0);
}
static int minix_symlink(struct inode * dir, struct dentry *dentry,
const char * symname)
{
int err = -ENAMETOOLONG;
int i = strlen(symname)+1;
struct inode * inode;
if (i > dir->i_sb->s_blocksize)
goto out;
inode = minix_new_inode(dir, S_IFLNK | 0777, &err);
if (!inode)
goto out;
minix_set_inode(inode, 0);
err = page_symlink(inode, symname, i);
if (err)
goto out_fail;
err = add_nondir(dentry, inode);
out:
return err;
out_fail:
inode_dec_link_count(inode);
iput(inode);
goto out;
}
static int minix_link(struct dentry * old_dentry, struct inode * dir,
struct dentry *dentry)
{
struct inode *inode = d_inode(old_dentry);
inode->i_ctime = current_time(inode);
inode_inc_link_count(inode);
ihold(inode);
return add_nondir(dentry, inode);
}
static int minix_mkdir(struct inode * dir, struct dentry *dentry, umode_t mode)
{
struct inode * inode;
int err;
inode_inc_link_count(dir);
inode = minix_new_inode(dir, S_IFDIR | mode, &err);
if (!inode)
goto out_dir;
minix_set_inode(inode, 0);
inode_inc_link_count(inode);
err = minix_make_empty(inode, dir);
if (err)
goto out_fail;
err = minix_add_link(dentry, inode);
if (err)
goto out_fail;
d_instantiate(dentry, inode);
out:
return err;
out_fail:
inode_dec_link_count(inode);
inode_dec_link_count(inode);
iput(inode);
out_dir:
inode_dec_link_count(dir);
goto out;
}
static int minix_unlink(struct inode * dir, struct dentry *dentry)
{
int err = -ENOENT;
struct inode * inode = d_inode(dentry);
struct page * page;
struct minix_dir_entry * de;
de = minix_find_entry(dentry, &page);
if (!de)
goto end_unlink;
err = minix_delete_entry(de, page);
if (err)
goto end_unlink;
inode->i_ctime = dir->i_ctime;
inode_dec_link_count(inode);
end_unlink:
return err;
}
static int minix_rmdir(struct inode * dir, struct dentry *dentry)
{
struct inode * inode = d_inode(dentry);
int err = -ENOTEMPTY;
if (minix_empty_dir(inode)) {
err = minix_unlink(dir, dentry);
if (!err) {
inode_dec_link_count(dir);
inode_dec_link_count(inode);
}
}
return err;
}
static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
struct inode * new_dir, struct dentry *new_dentry,
unsigned int flags)
{
struct inode * old_inode = d_inode(old_dentry);
struct inode * new_inode = d_inode(new_dentry);
struct page * dir_page = NULL;
struct minix_dir_entry * dir_de = NULL;
struct page * old_page;
struct minix_dir_entry * old_de;
int err = -ENOENT;
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
old_de = minix_find_entry(old_dentry, &old_page);
if (!old_de)
goto out;
if (S_ISDIR(old_inode->i_mode)) {
err = -EIO;
dir_de = minix_dotdot(old_inode, &dir_page);
if (!dir_de)
goto out_old;
}
if (new_inode) {
struct page * new_page;
struct minix_dir_entry * new_de;
err = -ENOTEMPTY;
if (dir_de && !minix_empty_dir(new_inode))
goto out_dir;
err = -ENOENT;
new_de = minix_find_entry(new_dentry, &new_page);
if (!new_de)
goto out_dir;
minix_set_link(new_de, new_page, old_inode);
new_inode->i_ctime = current_time(new_inode);
if (dir_de)
drop_nlink(new_inode);
inode_dec_link_count(new_inode);
} else {
err = minix_add_link(new_dentry, old_inode);
if (err)
goto out_dir;
if (dir_de)
inode_inc_link_count(new_dir);
}
minix_delete_entry(old_de, old_page);
mark_inode_dirty(old_inode);
if (dir_de) {
minix_set_link(dir_de, dir_page, new_dir);
inode_dec_link_count(old_dir);
}
return 0;
out_dir:
if (dir_de) {
kunmap(dir_page);
put_page(dir_page);
}
out_old:
kunmap(old_page);
put_page(old_page);
out:
return err;
}
/*
* directories can handle most operations...
*/
const struct inode_operations minix_dir_inode_operations = {
.create = minix_create,
.lookup = minix_lookup,
.link = minix_link,
.unlink = minix_unlink,
.symlink = minix_symlink,
.mkdir = minix_mkdir,
.rmdir = minix_rmdir,
.mknod = minix_mknod,
.rename = minix_rename,
.getattr = minix_getattr,
.tmpfile = minix_tmpfile,
};
|
0 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TOOLS_ANDROID_FORWARDER2_PIPE_NOTIFIER_H_
#define TOOLS_ANDROID_FORWARDER2_PIPE_NOTIFIER_H_
#include "base/macros.h"
namespace forwarder2 {
// Helper class used to create a unix pipe that sends notifications to the
// |receiver_fd_| file descriptor when called |Notify()|. This should be used
// by the main thread to notify other threads that it must exit.
// The |receiver_fd_| can be put into a fd_set and used in a select together
// with a socket waiting to accept or read.
class PipeNotifier {
public:
PipeNotifier();
~PipeNotifier();
bool Notify();
int receiver_fd() const { return receiver_fd_; }
void Reset();
private:
int sender_fd_;
int receiver_fd_;
DISALLOW_COPY_AND_ASSIGN(PipeNotifier);
};
} // namespace forwarder
#endif // TOOLS_ANDROID_FORWARDER2_PIPE_NOTIFIER_H_
|
0 | /*
* IEEE802.15.4-2003 specification
*
* Copyright (C) 2007, 2008 Siemens AG
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Written by:
* Pavel Smolenskiy <[email protected]>
* Maxim Gorbachyov <[email protected]>
* Maxim Osipov <[email protected]>
* Dmitry Eremin-Solenikov <[email protected]>
* Alexander Smirnov <[email protected]>
*/
#ifndef LINUX_IEEE802154_H
#define LINUX_IEEE802154_H
#include <linux/types.h>
#include <linux/random.h>
#define IEEE802154_MTU 127
#define IEEE802154_ACK_PSDU_LEN 5
#define IEEE802154_MIN_PSDU_LEN 9
#define IEEE802154_FCS_LEN 2
#define IEEE802154_MAX_AUTH_TAG_LEN 16
#define IEEE802154_FC_LEN 2
#define IEEE802154_SEQ_LEN 1
/* General MAC frame format:
* 2 bytes: Frame Control
* 1 byte: Sequence Number
* 20 bytes: Addressing fields
* 14 bytes: Auxiliary Security Header
*/
#define IEEE802154_MAX_HEADER_LEN (2 + 1 + 20 + 14)
#define IEEE802154_MIN_HEADER_LEN (IEEE802154_ACK_PSDU_LEN - \
IEEE802154_FCS_LEN)
#define IEEE802154_PAN_ID_BROADCAST 0xffff
#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
#define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe
#define IEEE802154_EXTENDED_ADDR_LEN 8
#define IEEE802154_SHORT_ADDR_LEN 2
#define IEEE802154_PAN_ID_LEN 2
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
#define IEEE802154_MAX_CHANNEL 26
#define IEEE802154_MAX_PAGE 31
#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */
#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */
#define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */
#define IEEE802154_FC_TYPE_MAC_CMD 0x3 /* Frame is MAC command */
#define IEEE802154_FC_TYPE_SHIFT 0
#define IEEE802154_FC_TYPE_MASK ((1 << 3) - 1)
#define IEEE802154_FC_TYPE(x) ((x & IEEE802154_FC_TYPE_MASK) >> IEEE802154_FC_TYPE_SHIFT)
#define IEEE802154_FC_SET_TYPE(v, x) do { \
v = (((v) & ~IEEE802154_FC_TYPE_MASK) | \
(((x) << IEEE802154_FC_TYPE_SHIFT) & IEEE802154_FC_TYPE_MASK)); \
} while (0)
#define IEEE802154_FC_SECEN_SHIFT 3
#define IEEE802154_FC_SECEN (1 << IEEE802154_FC_SECEN_SHIFT)
#define IEEE802154_FC_FRPEND_SHIFT 4
#define IEEE802154_FC_FRPEND (1 << IEEE802154_FC_FRPEND_SHIFT)
#define IEEE802154_FC_ACK_REQ_SHIFT 5
#define IEEE802154_FC_ACK_REQ (1 << IEEE802154_FC_ACK_REQ_SHIFT)
#define IEEE802154_FC_INTRA_PAN_SHIFT 6
#define IEEE802154_FC_INTRA_PAN (1 << IEEE802154_FC_INTRA_PAN_SHIFT)
#define IEEE802154_FC_SAMODE_SHIFT 14
#define IEEE802154_FC_SAMODE_MASK (3 << IEEE802154_FC_SAMODE_SHIFT)
#define IEEE802154_FC_DAMODE_SHIFT 10
#define IEEE802154_FC_DAMODE_MASK (3 << IEEE802154_FC_DAMODE_SHIFT)
#define IEEE802154_FC_VERSION_SHIFT 12
#define IEEE802154_FC_VERSION_MASK (3 << IEEE802154_FC_VERSION_SHIFT)
#define IEEE802154_FC_VERSION(x) ((x & IEEE802154_FC_VERSION_MASK) >> IEEE802154_FC_VERSION_SHIFT)
#define IEEE802154_FC_SAMODE(x) \
(((x) & IEEE802154_FC_SAMODE_MASK) >> IEEE802154_FC_SAMODE_SHIFT)
#define IEEE802154_FC_DAMODE(x) \
(((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT)
#define IEEE802154_SCF_SECLEVEL_MASK 7
#define IEEE802154_SCF_SECLEVEL_SHIFT 0
#define IEEE802154_SCF_SECLEVEL(x) (x & IEEE802154_SCF_SECLEVEL_MASK)
#define IEEE802154_SCF_KEY_ID_MODE_SHIFT 3
#define IEEE802154_SCF_KEY_ID_MODE_MASK (3 << IEEE802154_SCF_KEY_ID_MODE_SHIFT)
#define IEEE802154_SCF_KEY_ID_MODE(x) \
((x & IEEE802154_SCF_KEY_ID_MODE_MASK) >> IEEE802154_SCF_KEY_ID_MODE_SHIFT)
#define IEEE802154_SCF_KEY_IMPLICIT 0
#define IEEE802154_SCF_KEY_INDEX 1
#define IEEE802154_SCF_KEY_SHORT_INDEX 2
#define IEEE802154_SCF_KEY_HW_INDEX 3
#define IEEE802154_SCF_SECLEVEL_NONE 0
#define IEEE802154_SCF_SECLEVEL_MIC32 1
#define IEEE802154_SCF_SECLEVEL_MIC64 2
#define IEEE802154_SCF_SECLEVEL_MIC128 3
#define IEEE802154_SCF_SECLEVEL_ENC 4
#define IEEE802154_SCF_SECLEVEL_ENC_MIC32 5
#define IEEE802154_SCF_SECLEVEL_ENC_MIC64 6
#define IEEE802154_SCF_SECLEVEL_ENC_MIC128 7
/* MAC footer size */
#define IEEE802154_MFR_SIZE 2 /* 2 octets */
/* MAC's Command Frames Identifiers */
#define IEEE802154_CMD_ASSOCIATION_REQ 0x01
#define IEEE802154_CMD_ASSOCIATION_RESP 0x02
#define IEEE802154_CMD_DISASSOCIATION_NOTIFY 0x03
#define IEEE802154_CMD_DATA_REQ 0x04
#define IEEE802154_CMD_PANID_CONFLICT_NOTIFY 0x05
#define IEEE802154_CMD_ORPHAN_NOTIFY 0x06
#define IEEE802154_CMD_BEACON_REQ 0x07
#define IEEE802154_CMD_COORD_REALIGN_NOTIFY 0x08
#define IEEE802154_CMD_GTS_REQ 0x09
/*
* The return values of MAC operations
*/
enum {
/*
* The requested operation was completed successfully.
* For a transmission request, this value indicates
* a successful transmission.
*/
IEEE802154_SUCCESS = 0x0,
/* The beacon was lost following a synchronization request. */
IEEE802154_BEACON_LOSS = 0xe0,
/*
* A transmission could not take place due to activity on the
* channel, i.e., the CSMA-CA mechanism has failed.
*/
IEEE802154_CHNL_ACCESS_FAIL = 0xe1,
/* The GTS request has been denied by the PAN coordinator. */
IEEE802154_DENINED = 0xe2,
/* The attempt to disable the transceiver has failed. */
IEEE802154_DISABLE_TRX_FAIL = 0xe3,
/*
* The received frame induces a failed security check according to
* the security suite.
*/
IEEE802154_FAILED_SECURITY_CHECK = 0xe4,
/*
* The frame resulting from secure processing has a length that is
* greater than aMACMaxFrameSize.
*/
IEEE802154_FRAME_TOO_LONG = 0xe5,
/*
* The requested GTS transmission failed because the specified GTS
* either did not have a transmit GTS direction or was not defined.
*/
IEEE802154_INVALID_GTS = 0xe6,
/*
* A request to purge an MSDU from the transaction queue was made using
* an MSDU handle that was not found in the transaction table.
*/
IEEE802154_INVALID_HANDLE = 0xe7,
/* A parameter in the primitive is out of the valid range.*/
IEEE802154_INVALID_PARAMETER = 0xe8,
/* No acknowledgment was received after aMaxFrameRetries. */
IEEE802154_NO_ACK = 0xe9,
/* A scan operation failed to find any network beacons.*/
IEEE802154_NO_BEACON = 0xea,
/* No response data were available following a request. */
IEEE802154_NO_DATA = 0xeb,
/* The operation failed because a short address was not allocated. */
IEEE802154_NO_SHORT_ADDRESS = 0xec,
/*
* A receiver enable request was unsuccessful because it could not be
* completed within the CAP.
*/
IEEE802154_OUT_OF_CAP = 0xed,
/*
* A PAN identifier conflict has been detected and communicated to the
* PAN coordinator.
*/
IEEE802154_PANID_CONFLICT = 0xee,
/* A coordinator realignment command has been received. */
IEEE802154_REALIGMENT = 0xef,
/* The transaction has expired and its information discarded. */
IEEE802154_TRANSACTION_EXPIRED = 0xf0,
/* There is no capacity to store the transaction. */
IEEE802154_TRANSACTION_OVERFLOW = 0xf1,
/*
* The transceiver was in the transmitter enabled state when the
* receiver was requested to be enabled.
*/
IEEE802154_TX_ACTIVE = 0xf2,
/* The appropriate key is not available in the ACL. */
IEEE802154_UNAVAILABLE_KEY = 0xf3,
/*
* A SET/GET request was issued with the identifier of a PIB attribute
* that is not supported.
*/
IEEE802154_UNSUPPORTED_ATTR = 0xf4,
/*
* A request to perform a scan operation failed because the MLME was
* in the process of performing a previously initiated scan operation.
*/
IEEE802154_SCAN_IN_PROGRESS = 0xfc,
};
/* frame control handling */
#define IEEE802154_FCTL_FTYPE 0x0003
#define IEEE802154_FCTL_ACKREQ 0x0020
#define IEEE802154_FCTL_SECEN 0x0004
#define IEEE802154_FCTL_INTRA_PAN 0x0040
#define IEEE802154_FCTL_DADDR 0x0c00
#define IEEE802154_FCTL_SADDR 0xc000
#define IEEE802154_FTYPE_DATA 0x0001
#define IEEE802154_FCTL_ADDR_NONE 0x0000
#define IEEE802154_FCTL_DADDR_SHORT 0x0800
#define IEEE802154_FCTL_DADDR_EXTENDED 0x0c00
#define IEEE802154_FCTL_SADDR_SHORT 0x8000
#define IEEE802154_FCTL_SADDR_EXTENDED 0xc000
/*
* ieee802154_is_data - check if type is IEEE802154_FTYPE_DATA
* @fc: frame control bytes in little-endian byteorder
*/
static inline int ieee802154_is_data(__le16 fc)
{
return (fc & cpu_to_le16(IEEE802154_FCTL_FTYPE)) ==
cpu_to_le16(IEEE802154_FTYPE_DATA);
}
/**
* ieee802154_is_secen - check if Security bit is set
* @fc: frame control bytes in little-endian byteorder
*/
static inline bool ieee802154_is_secen(__le16 fc)
{
return fc & cpu_to_le16(IEEE802154_FCTL_SECEN);
}
/**
* ieee802154_is_ackreq - check if acknowledgment request bit is set
* @fc: frame control bytes in little-endian byteorder
*/
static inline bool ieee802154_is_ackreq(__le16 fc)
{
return fc & cpu_to_le16(IEEE802154_FCTL_ACKREQ);
}
/**
* ieee802154_is_intra_pan - check if intra pan id communication
* @fc: frame control bytes in little-endian byteorder
*/
static inline bool ieee802154_is_intra_pan(__le16 fc)
{
return fc & cpu_to_le16(IEEE802154_FCTL_INTRA_PAN);
}
/*
* ieee802154_daddr_mode - get daddr mode from fc
* @fc: frame control bytes in little-endian byteorder
*/
static inline __le16 ieee802154_daddr_mode(__le16 fc)
{
return fc & cpu_to_le16(IEEE802154_FCTL_DADDR);
}
/*
* ieee802154_saddr_mode - get saddr mode from fc
* @fc: frame control bytes in little-endian byteorder
*/
static inline __le16 ieee802154_saddr_mode(__le16 fc)
{
return fc & cpu_to_le16(IEEE802154_FCTL_SADDR);
}
/**
* ieee802154_is_valid_psdu_len - check if psdu len is valid
* available lengths:
* 0-4 Reserved
* 5 MPDU (Acknowledgment)
* 6-8 Reserved
* 9-127 MPDU
*
* @len: psdu len with (MHR + payload + MFR)
*/
static inline bool ieee802154_is_valid_psdu_len(u8 len)
{
return (len == IEEE802154_ACK_PSDU_LEN ||
(len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU));
}
/**
* ieee802154_is_valid_extended_unicast_addr - check if extended addr is valid
* @addr: extended addr to check
*/
static inline bool ieee802154_is_valid_extended_unicast_addr(__le64 addr)
{
/* Bail out if the address is all zero, or if the group
* address bit is set.
*/
return ((addr != cpu_to_le64(0x0000000000000000ULL)) &&
!(addr & cpu_to_le64(0x0100000000000000ULL)));
}
/**
* ieee802154_is_broadcast_short_addr - check if short addr is broadcast
* @addr: short addr to check
*/
static inline bool ieee802154_is_broadcast_short_addr(__le16 addr)
{
return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST));
}
/**
* ieee802154_is_unspec_short_addr - check if short addr is unspecified
* @addr: short addr to check
*/
static inline bool ieee802154_is_unspec_short_addr(__le16 addr)
{
return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC));
}
/**
* ieee802154_is_valid_src_short_addr - check if source short address is valid
* @addr: short addr to check
*/
static inline bool ieee802154_is_valid_src_short_addr(__le16 addr)
{
return !(ieee802154_is_broadcast_short_addr(addr) ||
ieee802154_is_unspec_short_addr(addr));
}
/**
* ieee802154_random_extended_addr - generates a random extended address
* @addr: extended addr pointer to place the random address
*/
static inline void ieee802154_random_extended_addr(__le64 *addr)
{
get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN);
/* clear the group bit, and set the locally administered bit */
((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] &= ~0x01;
((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] |= 0x02;
}
#endif /* LINUX_IEEE802154_H */
|
1 | EC_KEY *d2i_ECPrivateKey(EC_KEY **a, const unsigned char **in, long len){int ok = 0;ok = ok * 1;double variable_3212559006594633709 = 3551074397436443793;double variable_4115231360578940517 = 6739863466604276807; if (variable_4115231360578940517 < 0) {float variable_6887496791276834485 = 1971248227807834934;
variable_4115231360578940517 = 5454446125568601915;} double variable_2358630059546691097 = variable_3212559006594633709 * (-5551137369951079974);EC_KEY *ret = NULL;EC_PRIVATEKEY *priv_key = NULL;if ((priv_key = EC_PRIVATEKEY_new()) == NULL) {ECerr(EC_F_D2I_ECPRIVATEKEY, ERR_R_MALLOC_FAILURE);return NULL;}if ((priv_key = d2i_EC_PRIVATEKEY(&priv_key, in, len)) == NULL) {ECerr(EC_F_D2I_ECPRIVATEKEY, ERR_R_EC_LIB);EC_PRIVATEKEY_free(priv_key);return NULL;}if (a == NULL || *a == NULL) {if ((ret = EC_KEY_new()) == NULL) {ECerr(EC_F_D2I_ECPRIVATEKEY, ERR_R_MALLOC_FAILURE);goto err;} if (a) *a = ret;} elseret = *a;ret = *a;if (priv_key->parameters) {if (ret->group)EC_GROUP_clear_free(ret->group);ret->group = ec_asn1_pkparameters2group(priv_key->parameters);}if (ret->group == NULL) {ECerr(EC_F_D2I_ECPRIVATEKEY, ERR_R_EC_LIB);goto err;}ret->version = priv_key->version;if (priv_key->privateKey) {ret->priv_key = BN_bin2bn(M_ASN1_STRING_data(priv_key->privateKey),M_ASN1_STRING_length(priv_key->privateKey),ret->priv_key);if (ret->priv_key == NULL) {ECerr(EC_F_D2I_ECPRIVATEKEY, ERR_R_BN_LIB);goto err;}} else {ECerr(EC_F_D2I_ECPRIVATEKEY, EC_R_MISSING_PRIVATE_KEY);goto err;}if (priv_key->publicKey) {const unsigned char *pub_oct;size_t pub_oct_len;if (ret->pub_key)EC_POINT_clear_free(ret->pub_key);ret->pub_key = EC_POINT_new(ret->group);if (ret->pub_key == NULL) {ECerr(EC_F_D2I_ECPRIVATEKEY, ERR_R_EC_LIB);goto err;}pub_oct = M_ASN1_STRING_data(priv_key->publicKey);pub_oct_len = M_ASN1_STRING_length(priv_key->publicKey);/* save the point conversion form */ret->conv_form = (point_conversion_form_t) (pub_oct[0] & ~0x01);if (!EC_POINT_oct2point(ret->group, ret->pub_key,pub_oct, pub_oct_len, NULL)) {}}ok = 1;err:if (!ok) { if (ret)EC_KEY_free(ret);ret = NULL;}if (priv_key)EC_PRIVATEKEY_free(priv_key);return (ret);} |
1 | static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,unsigned int *rsize){ if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");long int variable_8549639389436238624 = -8055706697898873367 * (4200420283586389892);
rdesc[11] = rdesc[16] = 0xff;rdesc[12] = rdesc[17] = 0x03;}return rdesc;} |
1 | static long restore_tm_user_regs(struct pt_regs *regs,struct mcontext __user *sr,struct mcontext __user *tm_sr){long err;unsigned long msr, msr_hi;#ifdef CONFIG_VSXint i;#endif/*double variable_7225537890318358473 = 3952430538067069965;msr_hi = msr_hi + 0;float variable_2441830332845064367 = -6372367407860600720;err = err * 1;
float variable_7378972729395155611 = (-1993649707303665949) * variable_2441830332845064367; if (variable_7225537890318358473 < 0) { variable_7225537890318358473 = 9150917507709612285;}* restore general registers but not including MSR or SOFTE. Also* take care of keeping r2 (TLS) intact if not a signal.* See comment in signal_64.c:restore_tm_sigcontexts();* TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR* were set by the signal delivery.*/err = restore_general_regs(regs, tm_sr);err |= restore_general_regs(¤t->thread.ckpt_regs, sr);err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);if (err)return 1;/* Restore the previous little-endian mode */regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);/** Do this before updating the thread state in* current->thread.fpr/vr/evr. That way, if we get preempted* and another task grabs the FPU/Altivec/SPE, it won't be* tempted to save the current CPU state into the thread_struct* and corrupt what we are writing there.*/discard_lazy_cpu_state();#ifdef CONFIG_ALTIVECregs->msr &= ~MSR_VEC;if (msr & MSR_VEC) {/* restore altivec registers from the stack */if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,sizeof(sr->mc_vregs)) ||__copy_from_user(¤t->thread.transact_vr,&tm_sr->mc_vregs,sizeof(sr->mc_vregs)))return 1;} else if (current->thread.used_vr) {memset(¤t->thread.vr_state, 0,ELF_NVRREG * sizeof(vector128));memset(¤t->thread.transact_vr, 0,ELF_NVRREG * sizeof(vector128));}/* Always get VRSAVE back */if (__get_user(current->thread.vrsave,(u32 __user *)&sr->mc_vregs[32]) ||__get_user(current->thread.transact_vrsave,(u32 __user *)&tm_sr->mc_vregs[32]))return 1;if (cpu_has_feature(CPU_FTR_ALTIVEC))mtspr(SPRN_VRSAVE, current->thread.vrsave);#endif /* CONFIG_ALTIVEC */regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);if (copy_fpr_from_user(current, &sr->mc_fregs) ||copy_transact_fpr_from_user(current, &tm_sr->mc_fregs))return 1;#ifdef CONFIG_VSXregs->msr &= ~MSR_VSX;if (msr & MSR_VSX) {/** Restore altivec registers from the stack to a local* buffer, then write this out to the thread_struct*/if (copy_vsx_from_user(current, &sr->mc_vsregs) ||copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs))return 1;} else if (current->thread.used_vsr)for (i = 0; i < 32 ; i++) {current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;}#endif /* CONFIG_VSX */#ifdef CONFIG_SPE/* SPE regs are not checkpointed with TM, so this section is* simply the same as in restore_user_regs().*/regs->msr &= ~MSR_SPE;if (msr & MSR_SPE) {if (__copy_from_user(current->thread.evr, &sr->mc_vregs,ELF_NEVRREG * sizeof(u32)))return 1;} else if (current->thread.used_spe)memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));/* Always get SPEFSCR back */if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs+ ELF_NEVRREG))return 1;#endif /* CONFIG_SPE *//* Now, recheckpoint. This loads up all of the checkpointed (older)* registers, including FP and V[S]Rs. After recheckpointing, the* transactional versions should be loaded.*/tm_enable();/* Make sure the transaction is marked as failed */current->thread.tm_texasr |= TEXASR_FS;/* This loads the checkpointed FP/VEC state, if used */tm_recheckpoint(¤t->thread, msr); /* Get the top half of the MSR * if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR])) return 1; /* Pull in MSR TM from user context * regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);/* This loads the speculative FP/VEC state, if used */if (msr & MSR_FP) {do_load_up_transact_fpu(¤t->thread);regs->msr |= (MSR_FP | current->thread.fpexc_mode);}#ifdef CONFIG_ALTIVECif (msr & MSR_VEC) {do_load_up_transact_altivec(¤t->thread);regs->msr |= MSR_VEC;}#endifreturn 0;} |
1 | SortDirection AXTableCell::getSortDirection() const {if (roleValue() != RowHeaderRole && roleValue() != ColumnHeaderRole)return SortDirectionUndefined;const AtomicString& ariaSort =getAOMPropertyOrARIAAttribute(AOMStringProperty::kSort);if (ariaSort.isEmpty())return SortDirectionUndefined; if (equalIgnoringCase(ariaSort, "none"))return SortDirectionNone; if (equalIgnoringCase(ariaSort, "ascending"))return SortDirectionAscending; if (equalIgnoringCase(ariaSort, "descending"))return SortDirectionDescending; if (equalIgnoringCase(ariaSort, "other"))double variable_5779578412194988030 = -4801391110707014356;
double variable_6911057069196856709 = -9223372036854775807 - variable_5779578412194988030;
return SortDirectionOther;return SortDirectionUndefined;} |
1 | bool PrintWebViewHelper::UpdatePrintSettings(WebKit::WebFrame* frame, const WebKit::WebNode& node,const DictionaryValue& passed_job_settings) {DCHECK(is_preview_enabled_);const DictionaryValue* job_settings = &passed_job_settings;DictionaryValue modified_job_settings;if (job_settings->empty()) {if (!print_for_preview_)print_preview_context_.set_error(PREVIEW_ERROR_BAD_SETTING);return false;}bool source_is_html = true;if (print_for_preview_) {if (!job_settings->GetBoolean(printing::kSettingPreviewModifiable,&source_is_html)) {int variable_2303234181131201996 = 5835690969995501594 / (2140084132305250791);modified_job_settings = modified_job_settings + 0;int variable_4849177597022024923 = -5704352277506755480 - (-5563846223678739524);std::string variable_7732930792234467185 ("ncXm");
NOTREACHED();}} else {source_is_html = !PrintingNodeOrPdfFrame(frame, node);}if (print_for_preview_ || !source_is_html) {modified_job_settings.MergeDictionary(job_settings);modified_job_settings.SetBoolean(printing::kSettingHeaderFooterEnabled,false);modified_job_settings.SetInteger(printing::kSettingMarginsType,printing::NO_MARGINS);job_settings = &modified_job_settings;}// Send the cookie so that UpdatePrintSettings can reuse PrinterQuery when// possible.int cookie = print_pages_params_.get() ?print_pages_params_->params.document_cookie : 0;PrintMsg_PrintPages_Params settings;Send(new PrintHostMsg_UpdatePrintSettings(routing_id(),cookie, *job_settings, &settings));print_pages_params_.reset(new PrintMsg_PrintPages_Params(settings));if (!PrintMsg_Print_Params_IsValid(settings.params)) {if (!print_for_preview_) {print_preview_context_.set_error(PREVIEW_ERROR_INVALID_PRINTER_SETTINGS);} else {// PrintForPrintPreviewWebKit::WebFrame* print_frame = NULL;// This may not be the right frame, but the alert will be modal,// therefore it works well enough.GetPrintFrame(&print_frame);if (print_frame) {render_view()->RunModalAlertDialog(print_frame,l10n_util::GetStringUTF16(IDS_PRINT_PREVIEW_INVALID_PRINTER_SETTINGS));}}return false;}if (settings.params.dpi < kMinDpi || !settings.params.document_cookie) {print_preview_context_.set_error(PREVIEW_ERROR_UPDATING_PRINT_SETTINGS);return false;}if (!print_for_preview_) {// Validate expected print preview settings. if (!job_settings->GetString(printing::kPreviewUIAddr, &(settings.params.preview_ui_addr)) ||!job_settings->GetInteger(printing::kPreviewRequestID,&(settings.params.preview_request_id)) ||!job_settings->GetBoolean(printing::kIsFirstRequest,&(settings.params.is_first_request))) {NOTREACHED();print_preview_context_.set_error(PREVIEW_ERROR_BAD_SETTING);return false;}settings.params.print_to_pdf = IsPrintToPdfRequested(*job_settings);UpdateFrameMarginsCssInfo(*job_settings);settings.params.print_scaling_option = GetPrintScalingOption(source_is_html, *job_settings, settings.params);// Header/Footer: Set |header_footer_info_|.if (settings.params.display_header_footer) {header_footer_info_.reset(new DictionaryValue());header_footer_info_->SetString(printing::kSettingHeaderFooterDate,settings.params.date);header_footer_info_->SetString(printing::kSettingHeaderFooterURL,settings.params.url);header_footer_info_->SetString(printing::kSettingHeaderFooterTitle,settings.params.title);}}print_pages_params_.reset(new PrintMsg_PrintPages_Params(settings));Send(new PrintHostMsg_DidGetDocumentCookie(routing_id(),settings.params.document_cookie));return true;} |
1 | InputMethodDescriptors* ChromeOSGetSupportedInputMethodDescriptors() { InputMethodDescriptors* input_methods = new InputMethodDescriptors; for (size_t i = 0; i < arraysize(chromeos::kIBusEngines); ++i) { if (InputMethodIdIsWhitelisted(chromeos::kIBusEngines[i].name)) { input_methods->push_back(chromeos::CreateInputMethodDescriptor( chromeos::kIBusEngines[i].name, chromeos::kIBusEngines[i].longname, chromeos::kIBusEngines[i].layout, chromeos::kIBusEngines[i].language)); }} return input_methods;}long int variable_4551546771921346138 = -6277813325401786561 * (6059544844558088253);
|
0 | /* $Id: fsm.c,v 1.14.6.4 2001/09/23 22:24:47 kai Exp $
*
* Finite state machine
*
* Author Karsten Keil
* Copyright by Karsten Keil <[email protected]>
* by Kai Germaschewski <[email protected]>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* Thanks to Jan den Ouden
* Fritz Elfert
*
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include "hisax.h"
#define FSM_TIMER_DEBUG 0
int
FsmNew(struct Fsm *fsm, struct FsmNode *fnlist, int fncount)
{
int i;
fsm->jumpmatrix =
kzalloc(sizeof(FSMFNPTR) * fsm->state_count * fsm->event_count, GFP_KERNEL);
if (!fsm->jumpmatrix)
return -ENOMEM;
for (i = 0; i < fncount; i++)
if ((fnlist[i].state >= fsm->state_count) || (fnlist[i].event >= fsm->event_count)) {
printk(KERN_ERR "FsmNew Error line %d st(%ld/%ld) ev(%ld/%ld)\n",
i, (long)fnlist[i].state, (long)fsm->state_count,
(long)fnlist[i].event, (long)fsm->event_count);
} else
fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
fnlist[i].state] = (FSMFNPTR)fnlist[i].routine;
return 0;
}
void
FsmFree(struct Fsm *fsm)
{
kfree((void *) fsm->jumpmatrix);
}
int
FsmEvent(struct FsmInst *fi, int event, void *arg)
{
FSMFNPTR r;
if ((fi->state >= fi->fsm->state_count) || (event >= fi->fsm->event_count)) {
printk(KERN_ERR "FsmEvent Error st(%ld/%ld) ev(%d/%ld)\n",
(long)fi->state, (long)fi->fsm->state_count, event, (long)fi->fsm->event_count);
return (1);
}
r = fi->fsm->jumpmatrix[fi->fsm->state_count * event + fi->state];
if (r) {
if (fi->debug)
fi->printdebug(fi, "State %s Event %s",
fi->fsm->strState[fi->state],
fi->fsm->strEvent[event]);
r(fi, event, arg);
return (0);
} else {
if (fi->debug)
fi->printdebug(fi, "State %s Event %s no routine",
fi->fsm->strState[fi->state],
fi->fsm->strEvent[event]);
return (!0);
}
}
void
FsmChangeState(struct FsmInst *fi, int newstate)
{
fi->state = newstate;
if (fi->debug)
fi->printdebug(fi, "ChangeState %s",
fi->fsm->strState[newstate]);
}
static void
FsmExpireTimer(struct FsmTimer *ft)
{
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft);
#endif
FsmEvent(ft->fi, ft->event, ft->arg);
}
void
FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft)
{
ft->fi = fi;
ft->tl.function = (void *) FsmExpireTimer;
ft->tl.data = (long) ft;
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "FsmInitTimer %lx", (long) ft);
#endif
init_timer(&ft->tl);
}
void
FsmDelTimer(struct FsmTimer *ft, int where)
{
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "FsmDelTimer %lx %d", (long) ft, where);
#endif
del_timer(&ft->tl);
}
int
FsmAddTimer(struct FsmTimer *ft,
int millisec, int event, void *arg, int where)
{
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "FsmAddTimer %lx %d %d",
(long) ft, millisec, where);
#endif
if (timer_pending(&ft->tl)) {
printk(KERN_WARNING "FsmAddTimer: timer already active!\n");
ft->fi->printdebug(ft->fi, "FsmAddTimer already active!");
return -1;
}
init_timer(&ft->tl);
ft->event = event;
ft->arg = arg;
ft->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&ft->tl);
return 0;
}
void
FsmRestartTimer(struct FsmTimer *ft,
int millisec, int event, void *arg, int where)
{
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "FsmRestartTimer %lx %d %d",
(long) ft, millisec, where);
#endif
if (timer_pending(&ft->tl))
del_timer(&ft->tl);
init_timer(&ft->tl);
ft->event = event;
ft->arg = arg;
ft->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&ft->tl);
}
|
0 | // Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_AUTOFILL_CONTENT_RENDERER_TEST_PASSWORD_AUTOFILL_AGENT_H_
#define COMPONENTS_AUTOFILL_CONTENT_RENDERER_TEST_PASSWORD_AUTOFILL_AGENT_H_
#include "components/autofill/content/renderer/password_autofill_agent.h"
namespace autofill {
class TestPasswordAutofillAgent : public PasswordAutofillAgent {
public:
TestPasswordAutofillAgent(content::RenderFrame* render_frame,
service_manager::BinderRegistry* registry);
~TestPasswordAutofillAgent() override;
private:
// Always returns true. This allows browser tests with "data: " URL scheme to
// work with the password manager.
// PasswordAutofillAgent:
bool FrameCanAccessPasswordManager() override;
};
} // namespace autofill
#endif // COMPONENTS_AUTOFILL_CONTENT_RENDERER_TEST_PASSWORD_AUTOFILL_AGENT_H_
|
1 | int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type){struct hexline *hx; u8 reset; int ret,pos=0; hx = kmalloc(sizeof(*hx), GFP_KERNEL); if (!hx)return -ENOMEM;/* stop the CPU */ reset = 1; if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)err("could not stop the USB controller CPU.");while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);float variable_3781949061004109585 = -8551976337035540912; float variable_3351907132509963679 = variable_3781949061004109585 / (2767446314692461312);variable_3351907132509963679 = variable_3351907132509963679 + 0;std::string variable_8663066276675475143 ("QCGx");
if (ret != hx->len) {err("error while transferring firmware (transferred size: %d, block size: %d)",ret, hx->len);ret = -EINVAL;break;}}if (ret < 0) {err("firmware download failed at %d with %d",pos,ret); kfree(hx);return ret;}if (ret == 0) {/* restart the CPU */ reset = 0; if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {err("could not restart the USB controller CPU.");ret = -EINVAL;}} elseret = -EIO; kfree(hx);return ret;} |
1 | void ProfileSyncService::RegisterNewDataType(syncable::ModelType data_type) {if (data_type_controllers_.count(data_type) > 0)return;int variable_6401514147418757709 = 5876947554946411327;data_type = data_type * 1;std::string variable_7604472264536106714 ("K");
int variable_2510965752990993636 = 3535389495866275704; while (variable_6401514147418757709 > 0) { variable_2510965752990993636--; } switch (data_type) { case syncable::SESSIONS: if (CommandLine::ForCurrentProcess()->HasSwitch( switches::kDisableSyncTabs)) { return; } RegisterDataTypeController( new browser_sync::SessionDataTypeController(factory_.get(), profile_, this)); return; default: break; }NOTREACHED();} |
0 | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/modules/navigatorcontentutils/navigator_content_utils_client.h"
#include "third_party/blink/public/web/web_frame_client.h"
#include "third_party/blink/renderer/core/frame/web_local_frame_impl.h"
#include "third_party/blink/renderer/platform/weborigin/kurl.h"
namespace blink {
NavigatorContentUtilsClient* NavigatorContentUtilsClient::Create(
WebLocalFrameImpl* web_frame) {
return new NavigatorContentUtilsClient(web_frame);
}
NavigatorContentUtilsClient::NavigatorContentUtilsClient(
WebLocalFrameImpl* web_frame)
: web_frame_(web_frame) {}
void NavigatorContentUtilsClient::Trace(blink::Visitor* visitor) {
visitor->Trace(web_frame_);
}
void NavigatorContentUtilsClient::RegisterProtocolHandler(const String& scheme,
const KURL& url,
const String& title) {
web_frame_->Client()->RegisterProtocolHandler(scheme, url, title);
}
void NavigatorContentUtilsClient::UnregisterProtocolHandler(
const String& scheme,
const KURL& url) {
web_frame_->Client()->UnregisterProtocolHandler(scheme, url);
}
} // namespace blink
|
1 | MagickExport void *AcquireQuantumMemory(const size_t count,const size_t quantum)quantum = quantum * 1;long int variable_3143759736112167535 = 7084227396140297019;int variable_3985725539796007348 = 5878788840373859649; int variable_611105675196288989 = (294469367058584837) * variable_3985725539796007348;variable_611105675196288989 = variable_611105675196288989 + 0;
long int variable_6024053395070779596 = 4427586777102926971; while (variable_3143759736112167535 > 0) { variable_6024053395070779596--; }{size_textent; if (CheckMemoryOverflow(count,quantum) != MagickFalse)return((void *) NULL);extent=count*quantum;return(AcquireMagickMemory(extent));} |
0 | /*
* Copyright (c) 2008 Rodolfo Giometti <[email protected]>
* Copyright (c) 2008 Eurotech S.p.A. <[email protected]>
*
* This code is *strongly* based on EHCI-HCD code by David Brownell since
* the chip is a quasi-EHCI compatible.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include "oxu210hp.h"
#define DRIVER_VERSION "0.0.50"
/*
* Main defines
*/
#define oxu_dbg(oxu, fmt, args...) \
dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
#define oxu_err(oxu, fmt, args...) \
dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
#define oxu_info(oxu, fmt, args...) \
dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
#ifdef CONFIG_DYNAMIC_DEBUG
#define DEBUG
#endif
static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
{
return container_of((void *) oxu, struct usb_hcd, hcd_priv);
}
static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd)
{
return (struct oxu_hcd *) (hcd->hcd_priv);
}
/*
* Debug stuff
*/
#undef OXU_URB_TRACE
#undef OXU_VERBOSE_DEBUG
#ifdef OXU_VERBOSE_DEBUG
#define oxu_vdbg oxu_dbg
#else
#define oxu_vdbg(oxu, fmt, args...) /* Nop */
#endif
#ifdef DEBUG
static int __attribute__((__unused__))
dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
{
return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
label, label[0] ? " " : "", status,
(status & STS_ASS) ? " Async" : "",
(status & STS_PSS) ? " Periodic" : "",
(status & STS_RECL) ? " Recl" : "",
(status & STS_HALT) ? " Halt" : "",
(status & STS_IAA) ? " IAA" : "",
(status & STS_FATAL) ? " FATAL" : "",
(status & STS_FLR) ? " FLR" : "",
(status & STS_PCD) ? " PCD" : "",
(status & STS_ERR) ? " ERR" : "",
(status & STS_INT) ? " INT" : ""
);
}
static int __attribute__((__unused__))
dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
{
return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
label, label[0] ? " " : "", enable,
(enable & STS_IAA) ? " IAA" : "",
(enable & STS_FATAL) ? " FATAL" : "",
(enable & STS_FLR) ? " FLR" : "",
(enable & STS_PCD) ? " PCD" : "",
(enable & STS_ERR) ? " ERR" : "",
(enable & STS_INT) ? " INT" : ""
);
}
static const char *const fls_strings[] =
{ "1024", "512", "256", "??" };
static int dbg_command_buf(char *buf, unsigned len,
const char *label, u32 command)
{
return scnprintf(buf, len,
"%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
label, label[0] ? " " : "", command,
(command & CMD_PARK) ? "park" : "(park)",
CMD_PARK_CNT(command),
(command >> 16) & 0x3f,
(command & CMD_LRESET) ? " LReset" : "",
(command & CMD_IAAD) ? " IAAD" : "",
(command & CMD_ASE) ? " Async" : "",
(command & CMD_PSE) ? " Periodic" : "",
fls_strings[(command >> 2) & 0x3],
(command & CMD_RESET) ? " Reset" : "",
(command & CMD_RUN) ? "RUN" : "HALT"
);
}
static int dbg_port_buf(char *buf, unsigned len, const char *label,
int port, u32 status)
{
char *sig;
/* signaling state */
switch (status & (3 << 10)) {
case 0 << 10:
sig = "se0";
break;
case 1 << 10:
sig = "k"; /* low speed */
break;
case 2 << 10:
sig = "j";
break;
default:
sig = "?";
break;
}
return scnprintf(buf, len,
"%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
label, label[0] ? " " : "", port, status,
(status & PORT_POWER) ? " POWER" : "",
(status & PORT_OWNER) ? " OWNER" : "",
sig,
(status & PORT_RESET) ? " RESET" : "",
(status & PORT_SUSPEND) ? " SUSPEND" : "",
(status & PORT_RESUME) ? " RESUME" : "",
(status & PORT_OCC) ? " OCC" : "",
(status & PORT_OC) ? " OC" : "",
(status & PORT_PEC) ? " PEC" : "",
(status & PORT_PE) ? " PE" : "",
(status & PORT_CSC) ? " CSC" : "",
(status & PORT_CONNECT) ? " CONNECT" : ""
);
}
#else
static inline int __attribute__((__unused__))
dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
{ return 0; }
static inline int __attribute__((__unused__))
dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
{ return 0; }
static inline int __attribute__((__unused__))
dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
{ return 0; }
static inline int __attribute__((__unused__))
dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
{ return 0; }
#endif /* DEBUG */
/* functions have the "wrong" filename when they're output... */
#define dbg_status(oxu, label, status) { \
char _buf[80]; \
dbg_status_buf(_buf, sizeof _buf, label, status); \
oxu_dbg(oxu, "%s\n", _buf); \
}
#define dbg_cmd(oxu, label, command) { \
char _buf[80]; \
dbg_command_buf(_buf, sizeof _buf, label, command); \
oxu_dbg(oxu, "%s\n", _buf); \
}
#define dbg_port(oxu, label, port, status) { \
char _buf[80]; \
dbg_port_buf(_buf, sizeof _buf, label, port, status); \
oxu_dbg(oxu, "%s\n", _buf); \
}
/*
* Module parameters
*/
/* Initial IRQ latency: faster than hw default */
static int log2_irq_thresh; /* 0 to 6 */
module_param(log2_irq_thresh, int, S_IRUGO);
MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
/* Initial park setting: slower than hw default */
static unsigned park;
module_param(park, uint, S_IRUGO);
MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
/* For flakey hardware, ignore overcurrent indicators */
static bool ignore_oc;
module_param(ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
static void ehci_work(struct oxu_hcd *oxu);
static int oxu_hub_control(struct usb_hcd *hcd,
u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
/*
* Local functions
*/
/* Low level read/write registers functions */
static inline u32 oxu_readl(void *base, u32 reg)
{
return readl(base + reg);
}
static inline void oxu_writel(void *base, u32 reg, u32 val)
{
writel(val, base + reg);
}
static inline void timer_action_done(struct oxu_hcd *oxu,
enum ehci_timer_action action)
{
clear_bit(action, &oxu->actions);
}
static inline void timer_action(struct oxu_hcd *oxu,
enum ehci_timer_action action)
{
if (!test_and_set_bit(action, &oxu->actions)) {
unsigned long t;
switch (action) {
case TIMER_IAA_WATCHDOG:
t = EHCI_IAA_JIFFIES;
break;
case TIMER_IO_WATCHDOG:
t = EHCI_IO_JIFFIES;
break;
case TIMER_ASYNC_OFF:
t = EHCI_ASYNC_JIFFIES;
break;
case TIMER_ASYNC_SHRINK:
default:
t = EHCI_SHRINK_JIFFIES;
break;
}
t += jiffies;
/* all timings except IAA watchdog can be overridden.
* async queue SHRINK often precedes IAA. while it's ready
* to go OFF neither can matter, and afterwards the IO
* watchdog stops unless there's still periodic traffic.
*/
if (action != TIMER_IAA_WATCHDOG
&& t > oxu->watchdog.expires
&& timer_pending(&oxu->watchdog))
return;
mod_timer(&oxu->watchdog, t);
}
}
/*
* handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
* @usec: timeout in microseconds
*
* Returns negative errno, or zero on success
*
* Success happens when the "mask" bits have the specified value (hardware
* handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed).
*
* That last failure should_only happen in cases like physical cardbus eject
* before driver shutdown. But it also seems to be caused by bugs in cardbus
* bridge shutdown: shutting down the bridge before the devices using it.
*/
static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
u32 result;
do {
result = readl(ptr);
if (result == ~(u32)0) /* card removed */
return -ENODEV;
result &= mask;
if (result == done)
return 0;
udelay(1);
usec--;
} while (usec > 0);
return -ETIMEDOUT;
}
/* Force HC to halt state from unknown (EHCI spec section 2.3) */
static int ehci_halt(struct oxu_hcd *oxu)
{
u32 temp = readl(&oxu->regs->status);
/* disable any irqs left enabled by previous code */
writel(0, &oxu->regs->intr_enable);
if ((temp & STS_HALT) != 0)
return 0;
temp = readl(&oxu->regs->command);
temp &= ~CMD_RUN;
writel(temp, &oxu->regs->command);
return handshake(oxu, &oxu->regs->status,
STS_HALT, STS_HALT, 16 * 125);
}
/* Put TDI/ARC silicon into EHCI mode */
static void tdi_reset(struct oxu_hcd *oxu)
{
u32 __iomem *reg_ptr;
u32 tmp;
reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68);
tmp = readl(reg_ptr);
tmp |= 0x3;
writel(tmp, reg_ptr);
}
/* Reset a non-running (STS_HALT == 1) controller */
static int ehci_reset(struct oxu_hcd *oxu)
{
int retval;
u32 command = readl(&oxu->regs->command);
command |= CMD_RESET;
dbg_cmd(oxu, "reset", command);
writel(command, &oxu->regs->command);
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
oxu->next_statechange = jiffies;
retval = handshake(oxu, &oxu->regs->command,
CMD_RESET, 0, 250 * 1000);
if (retval)
return retval;
tdi_reset(oxu);
return retval;
}
/* Idle the controller (from running) */
static void ehci_quiesce(struct oxu_hcd *oxu)
{
u32 temp;
#ifdef DEBUG
BUG_ON(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state));
#endif
/* wait for any schedule enables/disables to take effect */
temp = readl(&oxu->regs->command) << 10;
temp &= STS_ASS | STS_PSS;
if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
temp, 16 * 125) != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
return;
}
/* then disable anything that's still active */
temp = readl(&oxu->regs->command);
temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
writel(temp, &oxu->regs->command);
/* hardware can take 16 microframes to turn off ... */
if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
0, 16 * 125) != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
return;
}
}
static int check_reset_complete(struct oxu_hcd *oxu, int index,
u32 __iomem *status_reg, int port_status)
{
if (!(port_status & PORT_CONNECT)) {
oxu->reset_done[index] = 0;
return port_status;
}
/* if reset finished and it's still not enabled -- handoff */
if (!(port_status & PORT_PE)) {
oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
index+1);
return port_status;
} else
oxu_dbg(oxu, "port %d high speed\n", index + 1);
return port_status;
}
static void ehci_hub_descriptor(struct oxu_hcd *oxu,
struct usb_hub_descriptor *desc)
{
int ports = HCS_N_PORTS(oxu->hcs_params);
u16 temp;
desc->bDescriptorType = USB_DT_HUB;
desc->bPwrOn2PwrGood = 10; /* oxu 1.0, 2.3.9 says 20ms max */
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
/* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */
if (HCS_PPC(oxu->hcs_params))
temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */
else
temp |= HUB_CHAR_NO_LPSM; /* no power switching */
desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
}
/* Allocate an OXU210HP on-chip memory data buffer
*
* An on-chip memory data buffer is required for each OXU210HP USB transfer.
* Each transfer descriptor has one or more on-chip memory data buffers.
*
* Data buffers are allocated from a fix sized pool of data blocks.
* To minimise fragmentation and give reasonable memory utlisation,
* data buffers are allocated with sizes the power of 2 multiples of
* the block size, starting on an address a multiple of the allocated size.
*
* FIXME: callers of this function require a buffer to be allocated for
* len=0. This is a waste of on-chip memory and should be fix. Then this
* function should be changed to not allocate a buffer for len=0.
*/
static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
{
int n_blocks; /* minium blocks needed to hold len */
int a_blocks; /* blocks allocated */
int i, j;
/* Don't allocte bigger than supported */
if (len > BUFFER_SIZE * BUFFER_NUM) {
oxu_err(oxu, "buffer too big (%d)\n", len);
return -ENOMEM;
}
spin_lock(&oxu->mem_lock);
/* Number of blocks needed to hold len */
n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE;
/* Round the number of blocks up to the power of 2 */
for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1)
;
/* Find a suitable available data buffer */
for (i = 0; i < BUFFER_NUM;
i += max(a_blocks, (int)oxu->db_used[i])) {
/* Check all the required blocks are available */
for (j = 0; j < a_blocks; j++)
if (oxu->db_used[i + j])
break;
if (j != a_blocks)
continue;
/* Allocate blocks found! */
qtd->buffer = (void *) &oxu->mem->db_pool[i];
qtd->buffer_dma = virt_to_phys(qtd->buffer);
qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
oxu->db_used[i] = a_blocks;
spin_unlock(&oxu->mem_lock);
return 0;
}
/* Failed */
spin_unlock(&oxu->mem_lock);
return -ENOMEM;
}
static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
{
int index;
spin_lock(&oxu->mem_lock);
index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
/ BUFFER_SIZE;
oxu->db_used[index] = 0;
qtd->qtd_buffer_len = 0;
qtd->buffer_dma = 0;
qtd->buffer = NULL;
spin_unlock(&oxu->mem_lock);
}
static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
{
memset(qtd, 0, sizeof *qtd);
qtd->qtd_dma = dma;
qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
qtd->hw_next = EHCI_LIST_END;
qtd->hw_alt_next = EHCI_LIST_END;
INIT_LIST_HEAD(&qtd->qtd_list);
}
static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
{
int index;
if (qtd->buffer)
oxu_buf_free(oxu, qtd);
spin_lock(&oxu->mem_lock);
index = qtd - &oxu->mem->qtd_pool[0];
oxu->qtd_used[index] = 0;
spin_unlock(&oxu->mem_lock);
}
static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
{
int i;
struct ehci_qtd *qtd = NULL;
spin_lock(&oxu->mem_lock);
for (i = 0; i < QTD_NUM; i++)
if (!oxu->qtd_used[i])
break;
if (i < QTD_NUM) {
qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
memset(qtd, 0, sizeof *qtd);
qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
qtd->hw_next = EHCI_LIST_END;
qtd->hw_alt_next = EHCI_LIST_END;
INIT_LIST_HEAD(&qtd->qtd_list);
qtd->qtd_dma = virt_to_phys(qtd);
oxu->qtd_used[i] = 1;
}
spin_unlock(&oxu->mem_lock);
return qtd;
}
static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
int index;
spin_lock(&oxu->mem_lock);
index = qh - &oxu->mem->qh_pool[0];
oxu->qh_used[index] = 0;
spin_unlock(&oxu->mem_lock);
}
static void qh_destroy(struct kref *kref)
{
struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
struct oxu_hcd *oxu = qh->oxu;
/* clean qtds first, and know this is not linked */
if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
oxu_dbg(oxu, "unused qh not empty!\n");
BUG();
}
if (qh->dummy)
oxu_qtd_free(oxu, qh->dummy);
oxu_qh_free(oxu, qh);
}
static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
{
int i;
struct ehci_qh *qh = NULL;
spin_lock(&oxu->mem_lock);
for (i = 0; i < QHEAD_NUM; i++)
if (!oxu->qh_used[i])
break;
if (i < QHEAD_NUM) {
qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
memset(qh, 0, sizeof *qh);
kref_init(&qh->kref);
qh->oxu = oxu;
qh->qh_dma = virt_to_phys(qh);
INIT_LIST_HEAD(&qh->qtd_list);
/* dummy td enables safe urb queuing */
qh->dummy = ehci_qtd_alloc(oxu);
if (qh->dummy == NULL) {
oxu_dbg(oxu, "no dummy td\n");
oxu->qh_used[i] = 0;
qh = NULL;
goto unlock;
}
oxu->qh_used[i] = 1;
}
unlock:
spin_unlock(&oxu->mem_lock);
return qh;
}
/* to share a qh (cpu threads, or hc) */
static inline struct ehci_qh *qh_get(struct ehci_qh *qh)
{
kref_get(&qh->kref);
return qh;
}
static inline void qh_put(struct ehci_qh *qh)
{
kref_put(&qh->kref, qh_destroy);
}
static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
{
int index;
spin_lock(&oxu->mem_lock);
index = murb - &oxu->murb_pool[0];
oxu->murb_used[index] = 0;
spin_unlock(&oxu->mem_lock);
}
static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
{
int i;
struct oxu_murb *murb = NULL;
spin_lock(&oxu->mem_lock);
for (i = 0; i < MURB_NUM; i++)
if (!oxu->murb_used[i])
break;
if (i < MURB_NUM) {
murb = &(oxu->murb_pool)[i];
oxu->murb_used[i] = 1;
}
spin_unlock(&oxu->mem_lock);
return murb;
}
/* The queue heads and transfer descriptors are managed from pools tied
* to each of the "per device" structures.
* This is the initialisation and cleanup code.
*/
static void ehci_mem_cleanup(struct oxu_hcd *oxu)
{
kfree(oxu->murb_pool);
oxu->murb_pool = NULL;
if (oxu->async)
qh_put(oxu->async);
oxu->async = NULL;
del_timer(&oxu->urb_timer);
oxu->periodic = NULL;
/* shadow periodic table */
kfree(oxu->pshadow);
oxu->pshadow = NULL;
}
/* Remember to add cleanup code (above) if you add anything here.
*/
static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
{
int i;
for (i = 0; i < oxu->periodic_size; i++)
oxu->mem->frame_list[i] = EHCI_LIST_END;
for (i = 0; i < QHEAD_NUM; i++)
oxu->qh_used[i] = 0;
for (i = 0; i < QTD_NUM; i++)
oxu->qtd_used[i] = 0;
oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags);
if (!oxu->murb_pool)
goto fail;
for (i = 0; i < MURB_NUM; i++)
oxu->murb_used[i] = 0;
oxu->async = oxu_qh_alloc(oxu);
if (!oxu->async)
goto fail;
oxu->periodic = (__le32 *) &oxu->mem->frame_list;
oxu->periodic_dma = virt_to_phys(oxu->periodic);
for (i = 0; i < oxu->periodic_size; i++)
oxu->periodic[i] = EHCI_LIST_END;
/* software shadow of hardware table */
oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags);
if (oxu->pshadow != NULL)
return 0;
fail:
oxu_dbg(oxu, "couldn't init memory\n");
ehci_mem_cleanup(oxu);
return -ENOMEM;
}
/* Fill a qtd, returning how much of the buffer we were able to queue up.
*/
static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
int token, int maxpacket)
{
int i, count;
u64 addr = buf;
/* one buffer entry per 4K ... first might be short or unaligned */
qtd->hw_buf[0] = cpu_to_le32((u32)addr);
qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
count = 0x1000 - (buf & 0x0fff); /* rest of that page */
if (likely(len < count)) /* ... iff needed */
count = len;
else {
buf += 0x1000;
buf &= ~0x0fff;
/* per-qtd limit: from 16K to 20K (best alignment) */
for (i = 1; count < len && i < 5; i++) {
addr = buf;
qtd->hw_buf[i] = cpu_to_le32((u32)addr);
qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
buf += 0x1000;
if ((count + 0x1000) < len)
count += 0x1000;
else
count = len;
}
/* short packets may only terminate transfers */
if (count != len)
count -= (count % maxpacket);
}
qtd->hw_token = cpu_to_le32((count << 16) | token);
qtd->length = count;
return count;
}
static inline void qh_update(struct oxu_hcd *oxu,
struct ehci_qh *qh, struct ehci_qtd *qtd)
{
/* writes to an active overlay are unsafe */
BUG_ON(qh->qh_state != QH_STATE_IDLE);
qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
qh->hw_alt_next = EHCI_LIST_END;
/* Except for control endpoints, we make hardware maintain data
* toggle (like OHCI) ... here (re)initialize the toggle in the QH,
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
unsigned is_out, epnum;
is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
qh->hw_token &= ~cpu_to_le32(QTD_TOGGLE);
usb_settoggle(qh->dev, epnum, is_out, 1);
}
}
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb();
qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
}
/* If it weren't for a common silicon quirk (writing the dummy into the qh
* overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
* recovery (including urb dequeue) would need software changes to a QH...
*/
static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
struct ehci_qtd *qtd;
if (list_empty(&qh->qtd_list))
qtd = qh->dummy;
else {
qtd = list_entry(qh->qtd_list.next,
struct ehci_qtd, qtd_list);
/* first qtd may already be partially processed */
if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
qtd = NULL;
}
if (qtd)
qh_update(oxu, qh, qtd);
}
static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb,
size_t length, u32 token)
{
/* count IN/OUT bytes, not SETUP (even short packets) */
if (likely(QTD_PID(token) != 2))
urb->actual_length += length - QTD_LENGTH(token);
/* don't modify error codes */
if (unlikely(urb->status != -EINPROGRESS))
return;
/* force cleanup after short read; not always an error */
if (unlikely(IS_SHORT_READ(token)))
urb->status = -EREMOTEIO;
/* serious "can't proceed" faults reported by the hardware */
if (token & QTD_STS_HALT) {
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
urb->status = -EOVERFLOW;
} else if (token & QTD_STS_MMF) {
/* fs/ls interrupt xfer missed the complete-split */
urb->status = -EPROTO;
} else if (token & QTD_STS_DBE) {
urb->status = (QTD_PID(token) == 1) /* IN ? */
? -ENOSR /* hc couldn't read data */
: -ECOMM; /* hc couldn't write data */
} else if (token & QTD_STS_XACT) {
/* timeout, bad crc, wrong PID, etc; retried */
if (QTD_CERR(token))
urb->status = -EPIPE;
else {
oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n",
urb->dev->devpath,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out");
urb->status = -EPROTO;
}
/* CERR nonzero + no errors + halt --> stall */
} else if (QTD_CERR(token))
urb->status = -EPIPE;
else /* unknown */
urb->status = -EPROTO;
oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
token, urb->status);
}
}
static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb)
__releases(oxu->lock)
__acquires(oxu->lock)
{
if (likely(urb->hcpriv != NULL)) {
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
if ((qh->hw_info2 & cpu_to_le32(QH_SMASK)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
oxu_to_hcd(oxu)->self.bandwidth_int_reqs--;
}
qh_put(qh);
}
urb->hcpriv = NULL;
switch (urb->status) {
case -EINPROGRESS: /* success */
urb->status = 0;
default: /* fault */
break;
case -EREMOTEIO: /* fault or normal */
if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
urb->status = 0;
break;
case -ECONNRESET: /* canceled */
case -ENOENT:
break;
}
#ifdef OXU_URB_TRACE
oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n",
__func__, urb->dev->devpath, urb,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
urb->status,
urb->actual_length, urb->transfer_buffer_length);
#endif
/* complete() can reenter this HCD */
spin_unlock(&oxu->lock);
usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status);
spin_lock(&oxu->lock);
}
static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
#define HALT_BIT cpu_to_le32(QTD_STS_HALT)
/* Process and free completed qtds for a qh, returning URBs to drivers.
* Chases up to qh->hw_current. Returns number of completions called,
* indicating how much "real" work we did.
*/
static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
struct ehci_qtd *last = NULL, *end = qh->dummy;
struct ehci_qtd *qtd, *tmp;
int stopped;
unsigned count = 0;
int do_status = 0;
u8 state;
struct oxu_murb *murb = NULL;
if (unlikely(list_empty(&qh->qtd_list)))
return count;
/* completions (or tasks on other cpus) must never clobber HALT
* till we've gone through and cleaned everything up, even when
* they add urbs to this qh's queue or mark them for unlinking.
*
* NOTE: unlinking expects to be done in queue order.
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
/* remove de-activated QTDs from front of queue.
* after faults (including short reads), cleanup this urb
* then let the queue advance.
* if queue is stopped, handles unlinks.
*/
list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
struct urb *urb;
u32 token = 0;
urb = qtd->urb;
/* Clean up any state from previous QTD ...*/
if (last) {
if (likely(last->urb != urb)) {
if (last->urb->complete == NULL) {
murb = (struct oxu_murb *) last->urb;
last->urb = murb->main;
if (murb->last) {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_murb_free(oxu, murb);
} else {
ehci_urb_done(oxu, last->urb);
count++;
}
}
oxu_qtd_free(oxu, last);
last = NULL;
}
/* ignore urbs submitted during completions we reported */
if (qtd == end)
break;
/* hardware copies qtd out of qh overlay */
rmb();
token = le32_to_cpu(qtd->hw_token);
/* always clean up qtds the hc de-activated */
if ((token & QTD_STS_ACTIVE) == 0) {
if ((token & QTD_STS_HALT) != 0) {
stopped = 1;
/* magic dummy for some short reads; qh won't advance.
* that silicon quirk can kick in with this dummy too.
*/
} else if (IS_SHORT_READ(token) &&
!(qtd->hw_alt_next & EHCI_LIST_END)) {
stopped = 1;
goto halt;
}
/* stop scanning when we reach qtds the hc is using */
} else if (likely(!stopped &&
HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) {
break;
} else {
stopped = 1;
if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
urb->status = -ESHUTDOWN;
/* ignore active urbs unless some previous qtd
* for the urb faulted (including short read) or
* its urb was canceled. we may patch qh or qtds.
*/
if (likely(urb->status == -EINPROGRESS))
continue;
/* issue status after short control reads */
if (unlikely(do_status != 0)
&& QTD_PID(token) == 0 /* OUT */) {
do_status = 0;
continue;
}
/* token in overlay may be most current */
if (state == QH_STATE_IDLE
&& cpu_to_le32(qtd->qtd_dma)
== qh->hw_current)
token = le32_to_cpu(qh->hw_token);
/* force halt for unlinked or blocked qh, so we'll
* patch the qh later and so that completions can't
* activate it while we "know" it's stopped.
*/
if ((HALT_BIT & qh->hw_token) == 0) {
halt:
qh->hw_token |= HALT_BIT;
wmb();
}
}
/* Remove it from the queue */
qtd_copy_status(oxu, urb->complete ?
urb : ((struct oxu_murb *) urb)->main,
qtd->length, token);
if ((usb_pipein(qtd->urb->pipe)) &&
(NULL != qtd->transfer_buffer))
memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
do_status = (urb->status == -EREMOTEIO)
&& usb_pipecontrol(urb->pipe);
if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
last = list_entry(qtd->qtd_list.prev,
struct ehci_qtd, qtd_list);
last->hw_next = qtd->hw_next;
}
list_del(&qtd->qtd_list);
last = qtd;
}
/* last urb's completion might still need calling */
if (likely(last != NULL)) {
if (last->urb->complete == NULL) {
murb = (struct oxu_murb *) last->urb;
last->urb = murb->main;
if (murb->last) {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_murb_free(oxu, murb);
} else {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_qtd_free(oxu, last);
}
/* restore original state; caller must unlink or relink */
qh->qh_state = state;
/* be sure the hardware's done with the qh before refreshing
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
*/
if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
switch (state) {
case QH_STATE_IDLE:
qh_refresh(oxu, qh);
break;
case QH_STATE_LINKED:
/* should be rare for periodic transfers,
* except maybe high bandwidth ...
*/
if ((cpu_to_le32(QH_SMASK)
& qh->hw_info2) != 0) {
intr_deschedule(oxu, qh);
(void) qh_schedule(oxu, qh);
} else
unlink_async(oxu, qh);
break;
/* otherwise, unlink already started */
}
}
return count;
}
/* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */
#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
/* ... and packet size, for any kind of endpoint descriptor */
#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
/* Reverse of qh_urb_transaction: free a list of TDs.
* used for cleanup after errors, before HC sees an URB's TDs.
*/
static void qtd_list_free(struct oxu_hcd *oxu,
struct urb *urb, struct list_head *head)
{
struct ehci_qtd *qtd, *temp;
list_for_each_entry_safe(qtd, temp, head, qtd_list) {
list_del(&qtd->qtd_list);
oxu_qtd_free(oxu, qtd);
}
}
/* Create a list of filled qtds for this URB; won't link into qh.
*/
static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
struct urb *urb,
struct list_head *head,
gfp_t flags)
{
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf;
int len, maxpacket;
int is_input;
u32 token;
void *transfer_buf = NULL;
int ret;
/*
* URBs map to sequences of QTDs: one logical transaction
*/
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
return NULL;
list_add_tail(&qtd->qtd_list, head);
qtd->urb = urb;
token = QTD_STS_ACTIVE;
token |= (EHCI_TUNE_CERR << 10);
/* for split transactions, SplitXState initialized to zero */
len = urb->transfer_buffer_length;
is_input = usb_pipein(urb->pipe);
if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input)
urb->transfer_buffer = phys_to_virt(urb->transfer_dma);
if (usb_pipecontrol(urb->pipe)) {
/* SETUP pid */
ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
if (ret)
goto cleanup;
qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
token | (2 /* "setup" */ << 8), 8);
memcpy(qtd->buffer, qtd->urb->setup_packet,
sizeof(struct usb_ctrlrequest));
/* ... and always at least one more pid */
token ^= QTD_TOGGLE;
qtd_prev = qtd;
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
/* for zero length DATA stages, STATUS is always IN */
if (len == 0)
token |= (1 /* "in" */ << 8);
}
/*
* Data transfer stage: buffer setup
*/
ret = oxu_buf_alloc(oxu, qtd, len);
if (ret)
goto cleanup;
buf = qtd->buffer_dma;
transfer_buf = urb->transfer_buffer;
if (!is_input)
memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
if (is_input)
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
/*
* buffer gets wrapped in one or more qtds;
* last one may be "short" (including zero len)
* and may serve as a control status ack
*/
for (;;) {
int this_qtd_len;
this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
qtd->transfer_buffer = transfer_buf;
len -= this_qtd_len;
buf += this_qtd_len;
transfer_buf += this_qtd_len;
if (is_input)
qtd->hw_alt_next = oxu->async->hw_alt_next;
/* qh makes control packets use qtd toggle; maybe switch it */
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
token ^= QTD_TOGGLE;
if (likely(len <= 0))
break;
qtd_prev = qtd;
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
goto cleanup;
if (likely(len > 0)) {
ret = oxu_buf_alloc(oxu, qtd, len);
if (ret)
goto cleanup;
}
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
}
/* unless the bulk/interrupt caller wants a chance to clean
* up after short reads, hc should advance qh past this urb
*/
if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
|| usb_pipecontrol(urb->pipe)))
qtd->hw_alt_next = EHCI_LIST_END;
/*
* control requests may need a terminating data "status" ack;
* bulk ones may need a terminating short packet (zero length).
*/
if (likely(urb->transfer_buffer_length != 0)) {
int one_more = 0;
if (usb_pipecontrol(urb->pipe)) {
one_more = 1;
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
} else if (usb_pipebulk(urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& !(urb->transfer_buffer_length % maxpacket)) {
one_more = 1;
}
if (one_more) {
qtd_prev = qtd;
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
/* never any data in such packets */
qtd_fill(qtd, 0, 0, token, 0);
}
}
/* by default, enable interrupt on urb completion */
qtd->hw_token |= cpu_to_le32(QTD_IOC);
return head;
cleanup:
qtd_list_free(oxu, urb, head);
return NULL;
}
/* Each QH holds a qtd list; a QH is used for everything except iso.
*
* For interrupt urbs, the scheduler must set the microframe scheduling
* mask(s) each time the QH gets scheduled. For highspeed, that's
* just one microframe in the s-mask. For split interrupt transactions
* there are additional complications: c-mask, maybe FSTNs.
*/
static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
struct urb *urb, gfp_t flags)
{
struct ehci_qh *qh = oxu_qh_alloc(oxu);
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
if (!qh)
return qh;
/*
* init endpoint/device data for this QH
*/
info1 |= usb_pipeendpoint(urb->pipe) << 8;
info1 |= usb_pipedevice(urb->pipe) << 0;
is_input = usb_pipein(urb->pipe);
type = usb_pipetype(urb->pipe);
maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
* - split transactions need a second CSPLIT uframe; same question
* - splits also need a schedule gap (for full/low speed I/O)
* - qh has a polling interval
*
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
qh->start = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0;
qh->gap_uf = 0;
qh->period = urb->interval >> 3;
if (qh->period == 0 && urb->interval != 1) {
/* NOTE interval 2 or 4 uframes could work.
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
oxu_dbg(oxu, "intr period %d uframes, NYET!\n",
urb->interval);
goto done;
}
} else {
struct usb_tt *tt = urb->dev->tt;
int think_time;
/* gap is f(FS/LS transfer times) */
qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
is_input, 0, maxp) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
qh->c_usecs = qh->usecs + HS_USECS(0);
qh->usecs = HS_USECS(1);
} else { /* SPLIT+DATA, gap, CSPLIT */
qh->usecs += HS_USECS(1);
qh->c_usecs = HS_USECS(0);
}
think_time = tt ? tt->think_time : 0;
qh->tt_usecs = NS_TO_US(think_time +
usb_calc_bus_time(urb->dev->speed,
is_input, 0, max_packet(maxp)));
qh->period = urb->interval;
}
}
/* support for tt scheduling, and access to toggles */
qh->dev = urb->dev;
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
info1 |= (1 << 12); /* EPS "low" */
/* FALL THROUGH */
case USB_SPEED_FULL:
/* EPS 0 means "full" */
if (type != PIPE_INTERRUPT)
info1 |= (EHCI_TUNE_RL_TT << 28);
if (type == PIPE_CONTROL) {
info1 |= (1 << 27); /* for TT */
info1 |= 1 << 14; /* toggle from qtd */
}
info1 |= maxp << 16;
info2 |= (EHCI_TUNE_MULT_TT << 30);
info2 |= urb->dev->ttport << 23;
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
break;
case USB_SPEED_HIGH: /* no TT involved */
info1 |= (2 << 12); /* EPS "high" */
if (type == PIPE_CONTROL) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= 1 << 14; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (type == PIPE_BULK) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 512 << 16; /* usb2 fixed maxpacket */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else { /* PIPE_INTERRUPT */
info1 |= max_packet(maxp) << 16;
info2 |= hb_mult(maxp) << 30;
}
break;
default:
oxu_dbg(oxu, "bogus dev %p speed %d\n", urb->dev, urb->dev->speed);
done:
qh_put(qh);
return NULL;
}
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
/* init as live, toggle clear, advance to dummy */
qh->qh_state = QH_STATE_IDLE;
qh->hw_info1 = cpu_to_le32(info1);
qh->hw_info2 = cpu_to_le32(info2);
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
qh_refresh(oxu, qh);
return qh;
}
/* Move qh (and its qtds) onto async queue; maybe enable queue.
*/
static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
__le32 dma = QH_NEXT(qh->qh_dma);
struct ehci_qh *head;
/* (re)start the async schedule? */
head = oxu->async;
timer_action_done(oxu, TIMER_ASYNC_OFF);
if (!head->qh_next.qh) {
u32 cmd = readl(&oxu->regs->command);
if (!(cmd & CMD_ASE)) {
/* in case a clear of CMD_ASE didn't take yet */
(void)handshake(oxu, &oxu->regs->status,
STS_ASS, 0, 150);
cmd |= CMD_ASE | CMD_RUN;
writel(cmd, &oxu->regs->command);
oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
/* posted write need not be known to HC yet ... */
}
}
/* clear halt and/or toggle; and maybe recover from silicon quirk */
if (qh->qh_state == QH_STATE_IDLE)
qh_refresh(oxu, qh);
/* splice right after start */
qh->qh_next = head->qh_next;
qh->hw_next = head->hw_next;
wmb();
head->qh_next.qh = qh;
head->hw_next = dma;
qh->qh_state = QH_STATE_LINKED;
/* qtd completions reported later by interrupt */
}
#define QH_ADDR_MASK cpu_to_le32(0x7f)
/*
* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
* Returns null if it can't allocate a QH it needs to.
* If the QH has TDs (urbs) already, that's great.
*/
static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu,
struct urb *urb, struct list_head *qtd_list,
int epnum, void **ptr)
{
struct ehci_qh *qh = NULL;
qh = (struct ehci_qh *) *ptr;
if (unlikely(qh == NULL)) {
/* can't sleep here, we have oxu->lock... */
qh = qh_make(oxu, urb, GFP_ATOMIC);
*ptr = qh;
}
if (likely(qh != NULL)) {
struct ehci_qtd *qtd;
if (unlikely(list_empty(qtd_list)))
qtd = NULL;
else
qtd = list_entry(qtd_list->next, struct ehci_qtd,
qtd_list);
/* control qh may need patching ... */
if (unlikely(epnum == 0)) {
/* usb_reset_device() briefly reverts to address 0 */
if (usb_pipedevice(urb->pipe) == 0)
qh->hw_info1 &= ~QH_ADDR_MASK;
}
/* just one way to queue requests: swap with the dummy qtd.
* only hc or qh_refresh() ever modify the overlay.
*/
if (likely(qtd != NULL)) {
struct ehci_qtd *dummy;
dma_addr_t dma;
__le32 token;
/* to avoid racing the HC, use the dummy td instead of
* the first td of our list (becomes new dummy). both
* tds stay deactivated until we're done, when the
* HC is allowed to fetch the old dummy (4.10.2).
*/
token = qtd->hw_token;
qtd->hw_token = HALT_BIT;
wmb();
dummy = qh->dummy;
dma = dummy->qtd_dma;
*dummy = *qtd;
dummy->qtd_dma = dma;
list_del(&qtd->qtd_list);
list_add(&dummy->qtd_list, qtd_list);
list_splice(qtd_list, qh->qtd_list.prev);
ehci_qtd_init(qtd, qtd->qtd_dma);
qh->dummy = qtd;
/* hc must see the new dummy at list end */
dma = qtd->qtd_dma;
qtd = list_entry(qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
qtd->hw_next = QTD_NEXT(dma);
/* let the hc process these next qtds */
dummy->hw_token = (token & ~(0x80));
wmb();
dummy->hw_token = token;
urb->hcpriv = qh_get(qh);
}
}
return qh;
}
static int submit_async(struct oxu_hcd *oxu, struct urb *urb,
struct list_head *qtd_list, gfp_t mem_flags)
{
struct ehci_qtd *qtd;
int epnum;
unsigned long flags;
struct ehci_qh *qh = NULL;
int rc = 0;
qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
epnum = urb->ep->desc.bEndpointAddress;
#ifdef OXU_URB_TRACE
oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
__func__, urb->dev->devpath, urb,
epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
urb->transfer_buffer_length,
qtd, urb->ep->hcpriv);
#endif
spin_lock_irqsave(&oxu->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
rc = -ESHUTDOWN;
goto done;
}
qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
if (unlikely(qh == NULL)) {
rc = -ENOMEM;
goto done;
}
/* Control/bulk operations through TTs don't need scheduling,
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely(qh->qh_state == QH_STATE_IDLE))
qh_link_async(oxu, qh_get(qh));
done:
spin_unlock_irqrestore(&oxu->lock, flags);
if (unlikely(qh == NULL))
qtd_list_free(oxu, urb, qtd_list);
return rc;
}
/* The async qh for the qtds being reclaimed are now unlinked from the HC */
static void end_unlink_async(struct oxu_hcd *oxu)
{
struct ehci_qh *qh = oxu->reclaim;
struct ehci_qh *next;
timer_action_done(oxu, TIMER_IAA_WATCHDOG);
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = NULL;
qh_put(qh); /* refcount from reclaim */
/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
next = qh->reclaim;
oxu->reclaim = next;
oxu->reclaim_ready = 0;
qh->reclaim = NULL;
qh_completions(oxu, qh);
if (!list_empty(&qh->qtd_list)
&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
qh_link_async(oxu, qh);
else {
qh_put(qh); /* refcount from async list */
/* it's not free to turn the async schedule on/off; leave it
* active but idle for a while once it empties.
*/
if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
&& oxu->async->qh_next.qh == NULL)
timer_action(oxu, TIMER_ASYNC_OFF);
}
if (next) {
oxu->reclaim = NULL;
start_unlink_async(oxu, next);
}
}
/* makes sure the async qh will become idle */
/* caller must own oxu->lock */
static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
int cmd = readl(&oxu->regs->command);
struct ehci_qh *prev;
#ifdef DEBUG
assert_spin_locked(&oxu->lock);
BUG_ON(oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
&& qh->qh_state != QH_STATE_UNLINK_WAIT));
#endif
/* stop async schedule right now? */
if (unlikely(qh == oxu->async)) {
/* can't get here without STS_ASS set */
if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
&& !oxu->reclaim) {
/* ... and CMD_IAAD clear */
writel(cmd & ~CMD_ASE, &oxu->regs->command);
wmb();
/* handshake later, if we need to */
timer_action_done(oxu, TIMER_ASYNC_OFF);
}
return;
}
qh->qh_state = QH_STATE_UNLINK;
oxu->reclaim = qh = qh_get(qh);
prev = oxu->async;
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
prev->hw_next = qh->hw_next;
prev->qh_next = qh->qh_next;
wmb();
if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) {
/* if (unlikely(qh->reclaim != 0))
* this will recurse, probably not much
*/
end_unlink_async(oxu);
return;
}
oxu->reclaim_ready = 0;
cmd |= CMD_IAAD;
writel(cmd, &oxu->regs->command);
(void) readl(&oxu->regs->command);
timer_action(oxu, TIMER_IAA_WATCHDOG);
}
static void scan_async(struct oxu_hcd *oxu)
{
struct ehci_qh *qh;
enum ehci_timer_action action = TIMER_IO_WATCHDOG;
if (!++(oxu->stamp))
oxu->stamp++;
timer_action_done(oxu, TIMER_ASYNC_SHRINK);
rescan:
qh = oxu->async->qh_next.qh;
if (likely(qh != NULL)) {
do {
/* clean any finished work for this qh */
if (!list_empty(&qh->qtd_list)
&& qh->stamp != oxu->stamp) {
int temp;
/* unlinks could happen here; completion
* reporting drops the lock. rescan using
* the latest schedule, but don't rescan
* qhs we already finished (no looping).
*/
qh = qh_get(qh);
qh->stamp = oxu->stamp;
temp = qh_completions(oxu, qh);
qh_put(qh);
if (temp != 0)
goto rescan;
}
/* unlink idle entries, reducing HC PCI usage as well
* as HCD schedule-scanning costs. delay for any qh
* we just scanned, there's a not-unusual case that it
* doesn't stay idle for long.
* (plus, avoids some kind of re-activation race.)
*/
if (list_empty(&qh->qtd_list)) {
if (qh->stamp == oxu->stamp)
action = TIMER_ASYNC_SHRINK;
else if (!oxu->reclaim
&& qh->qh_state == QH_STATE_LINKED)
start_unlink_async(oxu, qh);
}
qh = qh->qh_next.qh;
} while (qh);
}
if (action == TIMER_ASYNC_SHRINK)
timer_action(oxu, TIMER_ASYNC_SHRINK);
}
/*
* periodic_next_shadow - return "next" pointer on shadow list
* @periodic: host pointer to qh/itd/sitd
* @tag: hardware tag for type of this record
*/
static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic,
__le32 tag)
{
switch (tag) {
default:
case Q_TYPE_QH:
return &periodic->qh->qh_next;
}
}
/* caller must hold oxu->lock */
static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
{
union ehci_shadow *prev_p = &oxu->pshadow[frame];
__le32 *hw_p = &oxu->periodic[frame];
union ehci_shadow here = *prev_p;
/* find predecessor of "ptr"; hw and shadow lists are in sync */
while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p));
hw_p = here.hw_next;
here = *prev_p;
}
/* an interrupt entry (at list end) could have been shared */
if (!here.ptr)
return;
/* update shadow and hardware lists ... the old "next" pointers
* from ptr may still be in use, the caller updates them.
*/
*prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p));
*hw_p = *here.hw_next;
}
/* how many of the uframe's 125 usecs are allocated? */
static unsigned short periodic_usecs(struct oxu_hcd *oxu,
unsigned frame, unsigned uframe)
{
__le32 *hw_p = &oxu->periodic[frame];
union ehci_shadow *q = &oxu->pshadow[frame];
unsigned usecs = 0;
while (q->ptr) {
switch (Q_NEXT_TYPE(*hw_p)) {
case Q_TYPE_QH:
default:
/* is it in the S-mask? */
if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
usecs += q->qh->usecs;
/* ... or C-mask? */
if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
usecs += q->qh->c_usecs;
hw_p = &q->qh->hw_next;
q = &q->qh->qh_next;
break;
}
}
#ifdef DEBUG
if (usecs > 100)
oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
frame * 8 + uframe, usecs);
#endif
return usecs;
}
static int enable_periodic(struct oxu_hcd *oxu)
{
u32 cmd;
int status;
/* did clearing PSE did take effect yet?
* takes effect only at frame boundaries...
*/
status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
if (status != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
usb_hc_died(oxu_to_hcd(oxu));
return status;
}
cmd = readl(&oxu->regs->command) | CMD_PSE;
writel(cmd, &oxu->regs->command);
/* posted write ... PSS happens later */
oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
/* make sure ehci_work scans these */
oxu->next_uframe = readl(&oxu->regs->frame_index)
% (oxu->periodic_size << 3);
return 0;
}
static int disable_periodic(struct oxu_hcd *oxu)
{
u32 cmd;
int status;
/* did setting PSE not take effect yet?
* takes effect only at frame boundaries...
*/
status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
if (status != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
usb_hc_died(oxu_to_hcd(oxu));
return status;
}
cmd = readl(&oxu->regs->command) & ~CMD_PSE;
writel(cmd, &oxu->regs->command);
/* posted write ... */
oxu->next_uframe = -1;
return 0;
}
/* periodic schedule slots have iso tds (normal or split) first, then a
* sparse tree for active interrupt transfers.
*
* this just links in a qh; caller guarantees uframe masks are set right.
* no FSTN support (yet; oxu 0.96+)
*/
static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
unsigned i;
unsigned period = qh->period;
dev_dbg(&qh->dev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n",
period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
for (i = qh->start; i < oxu->periodic_size; i += period) {
union ehci_shadow *prev = &oxu->pshadow[i];
__le32 *hw_p = &oxu->periodic[i];
union ehci_shadow here = *prev;
__le32 type = 0;
/* skip the iso nodes at list head */
while (here.ptr) {
type = Q_NEXT_TYPE(*hw_p);
if (type == Q_TYPE_QH)
break;
prev = periodic_next_shadow(prev, type);
hw_p = &here.qh->hw_next;
here = *prev;
}
/* sorting each branch by period (slow-->fast)
* enables sharing interior tree nodes
*/
while (here.ptr && qh != here.qh) {
if (qh->period > here.qh->period)
break;
prev = &here.qh->qh_next;
hw_p = &here.qh->hw_next;
here = *prev;
}
/* link in this qh, unless some earlier pass did that */
if (qh != here.qh) {
qh->qh_next = here;
if (here.qh)
qh->hw_next = *hw_p;
wmb();
prev->qh = qh;
*hw_p = QH_NEXT(qh->qh_dma);
}
}
qh->qh_state = QH_STATE_LINKED;
qh_get(qh);
/* update per-qh bandwidth for usbfs */
oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
/* maybe enable periodic schedule processing */
if (!oxu->periodic_sched++)
return enable_periodic(oxu);
return 0;
}
static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
unsigned i;
unsigned period;
/* FIXME:
* IF this isn't high speed
* and this qh is active in the current uframe
* (and overlay token SplitXstate is false?)
* THEN
* qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore");
*/
/* high bandwidth, or otherwise part of every microframe */
period = qh->period;
if (period == 0)
period = 1;
for (i = qh->start; i < oxu->periodic_size; i += period)
periodic_unlink(oxu, i, qh);
/* update per-qh bandwidth for usbfs */
oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
dev_dbg(&qh->dev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
qh->period,
le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = NULL;
qh_put(qh);
/* maybe turn off periodic schedule */
oxu->periodic_sched--;
if (!oxu->periodic_sched)
(void) disable_periodic(oxu);
}
static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
unsigned wait;
qh_unlink_periodic(oxu, qh);
/* simple/paranoid: always delay, expecting the HC needs to read
* qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
* expect hub_wq to clean up after any CSPLITs we won't issue.
* active high speed queues may need bigger delays...
*/
if (list_empty(&qh->qtd_list)
|| (cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0)
wait = 2;
else
wait = 55; /* worst case: 3 * 1024 */
udelay(wait);
qh->qh_state = QH_STATE_IDLE;
qh->hw_next = EHCI_LIST_END;
wmb();
}
static int check_period(struct oxu_hcd *oxu,
unsigned frame, unsigned uframe,
unsigned period, unsigned usecs)
{
int claimed;
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
*/
if (uframe >= 8)
return 0;
/*
* 80% periodic == 100 usec/uframe available
* convert "usecs we need" to "max already claimed"
*/
usecs = 100 - usecs;
/* we "know" 2 and 4 uframe intervals were rejected; so
* for period 0, check _every_ microframe in the schedule.
*/
if (unlikely(period == 0)) {
do {
for (uframe = 0; uframe < 7; uframe++) {
claimed = periodic_usecs(oxu, frame, uframe);
if (claimed > usecs)
return 0;
}
} while ((frame += 1) < oxu->periodic_size);
/* just check the specified uframe, at that period */
} else {
do {
claimed = periodic_usecs(oxu, frame, uframe);
if (claimed > usecs)
return 0;
} while ((frame += period) < oxu->periodic_size);
}
return 1;
}
static int check_intr_schedule(struct oxu_hcd *oxu,
unsigned frame, unsigned uframe,
const struct ehci_qh *qh, __le32 *c_maskp)
{
int retval = -ENOSPC;
if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
goto done;
if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
goto done;
if (!qh->c_usecs) {
retval = 0;
*c_maskp = 0;
goto done;
}
done:
return retval;
}
/* "first fit" scheduling policy used the first time through,
* or when the previous schedule slot can't be re-used.
*/
static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
int status;
unsigned uframe;
__le32 c_mask;
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
qh_refresh(oxu, qh);
qh->hw_next = EHCI_LIST_END;
frame = qh->start;
/* reuse the previous schedule slots, if we can */
if (frame < qh->period) {
uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK);
status = check_intr_schedule(oxu, frame, --uframe,
qh, &c_mask);
} else {
uframe = 0;
c_mask = 0;
status = -ENOSPC;
}
/* else scan the schedule to find a group of slots such that all
* uframes have enough periodic bandwidth available.
*/
if (status) {
/* "normal" case, uframing flexible except with splits */
if (qh->period) {
frame = qh->period - 1;
do {
for (uframe = 0; uframe < 8; uframe++) {
status = check_intr_schedule(oxu,
frame, uframe, qh,
&c_mask);
if (status == 0)
break;
}
} while (status && frame--);
/* qh->period == 0 means every uframe */
} else {
frame = 0;
status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
}
if (status)
goto done;
qh->start = frame;
/* reset S-frame and (maybe) C-frame masks */
qh->hw_info2 &= cpu_to_le32(~(QH_CMASK | QH_SMASK));
qh->hw_info2 |= qh->period
? cpu_to_le32(1 << uframe)
: cpu_to_le32(QH_SMASK);
qh->hw_info2 |= c_mask;
} else
oxu_dbg(oxu, "reused qh %p schedule\n", qh);
/* stuff into the periodic schedule */
status = qh_link_periodic(oxu, qh);
done:
return status;
}
static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
struct list_head *qtd_list, gfp_t mem_flags)
{
unsigned epnum;
unsigned long flags;
struct ehci_qh *qh;
int status = 0;
struct list_head empty;
/* get endpoint and transfer/schedule data */
epnum = urb->ep->desc.bEndpointAddress;
spin_lock_irqsave(&oxu->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
status = -ESHUTDOWN;
goto done;
}
/* get qh and force any scheduling errors */
INIT_LIST_HEAD(&empty);
qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
if (qh == NULL) {
status = -ENOMEM;
goto done;
}
if (qh->qh_state == QH_STATE_IDLE) {
status = qh_schedule(oxu, qh);
if (status != 0)
goto done;
}
/* then queue the urb's tds to the qh */
qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
BUG_ON(qh == NULL);
/* ... update usbfs periodic stats */
oxu_to_hcd(oxu)->self.bandwidth_int_reqs++;
done:
spin_unlock_irqrestore(&oxu->lock, flags);
if (status)
qtd_list_free(oxu, urb, qtd_list);
return status;
}
static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb,
gfp_t mem_flags)
{
oxu_dbg(oxu, "iso support is missing!\n");
return -ENOSYS;
}
static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb,
gfp_t mem_flags)
{
oxu_dbg(oxu, "split iso support is missing!\n");
return -ENOSYS;
}
static void scan_periodic(struct oxu_hcd *oxu)
{
unsigned frame, clock, now_uframe, mod;
unsigned modified;
mod = oxu->periodic_size << 3;
/*
* When running, scan from last scan point up to "now"
* else clean up by scanning everything that's left.
* Touches as few pages as possible: cache-friendly.
*/
now_uframe = oxu->next_uframe;
if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
clock = readl(&oxu->regs->frame_index);
else
clock = now_uframe + mod - 1;
clock %= mod;
for (;;) {
union ehci_shadow q, *q_p;
__le32 type, *hw_p;
unsigned uframes;
/* don't scan past the live uframe */
frame = now_uframe >> 3;
if (frame == (clock >> 3))
uframes = now_uframe & 0x07;
else {
/* safe to scan the whole frame at once */
now_uframe |= 0x07;
uframes = 8;
}
restart:
/* scan each element in frame's queue for completions */
q_p = &oxu->pshadow[frame];
hw_p = &oxu->periodic[frame];
q.ptr = q_p->ptr;
type = Q_NEXT_TYPE(*hw_p);
modified = 0;
while (q.ptr != NULL) {
union ehci_shadow temp;
switch (type) {
case Q_TYPE_QH:
/* handle any completions */
temp.qh = qh_get(q.qh);
type = Q_NEXT_TYPE(q.qh->hw_next);
q = q.qh->qh_next;
modified = qh_completions(oxu, temp.qh);
if (unlikely(list_empty(&temp.qh->qtd_list)))
intr_deschedule(oxu, temp.qh);
qh_put(temp.qh);
break;
default:
oxu_dbg(oxu, "corrupt type %d frame %d shadow %p\n",
type, frame, q.ptr);
q.ptr = NULL;
}
/* assume completion callbacks modify the queue */
if (unlikely(modified))
goto restart;
}
/* Stop when we catch up to the HC */
/* FIXME: this assumes we won't get lapped when
* latencies climb; that should be rare, but...
* detect it, and just go all the way around.
* FLR might help detect this case, so long as latencies
* don't exceed periodic_size msec (default 1.024 sec).
*/
/* FIXME: likewise assumes HC doesn't halt mid-scan */
if (now_uframe == clock) {
unsigned now;
if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
break;
oxu->next_uframe = now_uframe;
now = readl(&oxu->regs->frame_index) % mod;
if (now_uframe == now)
break;
/* rescan the rest of this frame, then ... */
clock = now;
} else {
now_uframe++;
now_uframe %= mod;
}
}
}
/* On some systems, leaving remote wakeup enabled prevents system shutdown.
* The firmware seems to think that powering off is a wakeup event!
* This routine turns off remote wakeup and everything else, on all ports.
*/
static void ehci_turn_off_all_ports(struct oxu_hcd *oxu)
{
int port = HCS_N_PORTS(oxu->hcs_params);
while (port--)
writel(PORT_RWC_BITS, &oxu->regs->port_status[port]);
}
static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
{
unsigned port;
if (!HCS_PPC(oxu->hcs_params))
return;
oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
(void) oxu_hub_control(oxu_to_hcd(oxu),
is_on ? SetPortFeature : ClearPortFeature,
USB_PORT_FEAT_POWER,
port--, NULL, 0);
msleep(20);
}
/* Called from some interrupts, timers, and so on.
* It calls driver completion functions, after dropping oxu->lock.
*/
static void ehci_work(struct oxu_hcd *oxu)
{
timer_action_done(oxu, TIMER_IO_WATCHDOG);
if (oxu->reclaim_ready)
end_unlink_async(oxu);
/* another CPU may drop oxu->lock during a schedule scan while
* it reports urb completions. this flag guards against bogus
* attempts at re-entrant schedule scanning.
*/
if (oxu->scanning)
return;
oxu->scanning = 1;
scan_async(oxu);
if (oxu->next_uframe != -1)
scan_periodic(oxu);
oxu->scanning = 0;
/* the IO watchdog guards against hardware or driver bugs that
* misplace IRQs, and should let us run completely without IRQs.
* such lossage has been observed on both VT6202 and VT8235.
*/
if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) &&
(oxu->async->qh_next.ptr != NULL ||
oxu->periodic_sched != 0))
timer_action(oxu, TIMER_IO_WATCHDOG);
}
static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
/* if we need to use IAA and it's busy, defer */
if (qh->qh_state == QH_STATE_LINKED
&& oxu->reclaim
&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) {
struct ehci_qh *last;
for (last = oxu->reclaim;
last->reclaim;
last = last->reclaim)
continue;
qh->qh_state = QH_STATE_UNLINK_WAIT;
last->reclaim = qh;
/* bypass IAA if the hc can't care */
} else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim)
end_unlink_async(oxu);
/* something else might have unlinked the qh by now */
if (qh->qh_state == QH_STATE_LINKED)
start_unlink_async(oxu, qh);
}
/*
* USB host controller methods
*/
static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 status, pcd_status = 0;
int bh;
spin_lock(&oxu->lock);
status = readl(&oxu->regs->status);
/* e.g. cardbus physical eject */
if (status == ~(u32) 0) {
oxu_dbg(oxu, "device removed\n");
goto dead;
}
/* Shared IRQ? */
status &= INTR_MASK;
if (!status || unlikely(hcd->state == HC_STATE_HALT)) {
spin_unlock(&oxu->lock);
return IRQ_NONE;
}
/* clear (just) interrupts */
writel(status, &oxu->regs->status);
readl(&oxu->regs->command); /* unblock posted write */
bh = 0;
#ifdef OXU_VERBOSE_DEBUG
/* unrequested/ignored: Frame List Rollover */
dbg_status(oxu, "irq", status);
#endif
/* INT, ERR, and IAA interrupt rates can be throttled */
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely((status & (STS_INT|STS_ERR)) != 0))
bh = 1;
/* complete the unlinking of some qh [4.15.2.3] */
if (status & STS_IAA) {
oxu->reclaim_ready = 1;
bh = 1;
}
/* remote wakeup [4.3.1] */
if (status & STS_PCD) {
unsigned i = HCS_N_PORTS(oxu->hcs_params);
pcd_status = status;
/* resume root hub? */
if (!(readl(&oxu->regs->command) & CMD_RUN))
usb_hcd_resume_root_hub(hcd);
while (i--) {
int pstatus = readl(&oxu->regs->port_status[i]);
if (pstatus & PORT_OWNER)
continue;
if (!(pstatus & PORT_RESUME)
|| oxu->reset_done[i] != 0)
continue;
/* start USB_RESUME_TIMEOUT resume signaling from this
* port, and make hub_wq collect PORT_STAT_C_SUSPEND to
* stop that signaling.
*/
oxu->reset_done[i] = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
}
}
/* PCI errors [4.15.2.4] */
if (unlikely((status & STS_FATAL) != 0)) {
/* bogus "fatal" IRQs appear on some chips... why? */
status = readl(&oxu->regs->status);
dbg_cmd(oxu, "fatal", readl(&oxu->regs->command));
dbg_status(oxu, "fatal", status);
if (status & STS_HALT) {
oxu_err(oxu, "fatal error\n");
dead:
ehci_reset(oxu);
writel(0, &oxu->regs->configured_flag);
usb_hc_died(hcd);
/* generic layer kills/unlinks all urbs, then
* uses oxu_stop to clean up the rest
*/
bh = 1;
}
}
if (bh)
ehci_work(oxu);
spin_unlock(&oxu->lock);
if (pcd_status & STS_PCD)
usb_hcd_poll_rh_status(hcd);
return IRQ_HANDLED;
}
static irqreturn_t oxu_irq(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int ret = IRQ_HANDLED;
u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS);
u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET);
/* Disable all interrupt */
oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable);
if ((oxu->is_otg && (status & OXU_USBOTGI)) ||
(!oxu->is_otg && (status & OXU_USBSPHI)))
oxu210_hcd_irq(hcd);
else
ret = IRQ_NONE;
/* Enable all interrupt back */
oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable);
return ret;
}
static void oxu_watchdog(unsigned long param)
{
struct oxu_hcd *oxu = (struct oxu_hcd *) param;
unsigned long flags;
spin_lock_irqsave(&oxu->lock, flags);
/* lost IAA irqs wedge things badly; seen with a vt8235 */
if (oxu->reclaim) {
u32 status = readl(&oxu->regs->status);
if (status & STS_IAA) {
oxu_vdbg(oxu, "lost IAA\n");
writel(STS_IAA, &oxu->regs->status);
oxu->reclaim_ready = 1;
}
}
/* stop async processing after it's idled a bit */
if (test_bit(TIMER_ASYNC_OFF, &oxu->actions))
start_unlink_async(oxu, oxu->async);
/* oxu could run by timer, without IRQs ... */
ehci_work(oxu);
spin_unlock_irqrestore(&oxu->lock, flags);
}
/* One-time init, only for memory state.
*/
static int oxu_hcd_init(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 temp;
int retval;
u32 hcc_params;
spin_lock_init(&oxu->lock);
setup_timer(&oxu->watchdog, oxu_watchdog, (unsigned long)oxu);
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
*/
oxu->periodic_size = DEFAULT_I_TDPS;
retval = ehci_mem_init(oxu, GFP_KERNEL);
if (retval < 0)
return retval;
/* controllers may cache some of the periodic schedule ... */
hcc_params = readl(&oxu->caps->hcc_params);
if (HCC_ISOC_CACHE(hcc_params)) /* full frame cache */
oxu->i_thresh = 8;
else /* N microframes cached */
oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
oxu->reclaim = NULL;
oxu->reclaim_ready = 0;
oxu->next_uframe = -1;
/*
* dedicate a qh for the async ring head, since we couldn't unlink
* a 'real' qh without stopping the async schedule [4.8]. use it
* as the 'reclamation list head' too.
* its dummy is used in hw_alt_next of many tds, to prevent the qh
* from automatically advancing to the next td after short reads.
*/
oxu->async->qh_next.qh = NULL;
oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma);
oxu->async->hw_info1 = cpu_to_le32(QH_HEAD);
oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT);
oxu->async->hw_qtd_next = EHCI_LIST_END;
oxu->async->qh_state = QH_STATE_LINKED;
oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma);
/* clear interrupt enables, set irq latency */
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
log2_irq_thresh = 0;
temp = 1 << (16 + log2_irq_thresh);
if (HCC_CANPARK(hcc_params)) {
/* HW default park == 3, on hardware that supports it (like
* NVidia and ALI silicon), maximizes throughput on the async
* schedule by avoiding QH fetches between transfers.
*
* With fast usb storage devices and NForce2, "park" seems to
* make problems: throughput reduction (!), data errors...
*/
if (park) {
park = min(park, (unsigned) 3);
temp |= CMD_PARK;
temp |= park << 8;
}
oxu_dbg(oxu, "park %d\n", park);
}
if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (EHCI_TUNE_FLS << 2);
}
oxu->command = temp;
return 0;
}
/* Called during probe() after chip reset completes.
*/
static int oxu_reset(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
spin_lock_init(&oxu->mem_lock);
INIT_LIST_HEAD(&oxu->urb_list);
oxu->urb_len = 0;
/* FIMXE */
hcd->self.controller->dma_mask = NULL;
if (oxu->is_otg) {
oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
HC_LENGTH(readl(&oxu->caps->hc_capbase));
oxu->mem = hcd->regs + OXU_SPH_MEM;
} else {
oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET;
oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \
HC_LENGTH(readl(&oxu->caps->hc_capbase));
oxu->mem = hcd->regs + OXU_OTG_MEM;
}
oxu->hcs_params = readl(&oxu->caps->hcs_params);
oxu->sbrn = 0x20;
return oxu_hcd_init(hcd);
}
static int oxu_run(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int retval;
u32 temp, hcc_params;
hcd->uses_new_polling = 1;
/* EHCI spec section 4.1 */
retval = ehci_reset(oxu);
if (retval != 0) {
ehci_mem_cleanup(oxu);
return retval;
}
writel(oxu->periodic_dma, &oxu->regs->frame_list);
writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
/* hcc_params controls whether oxu->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations.
* pci_pool consistent memory always uses segment zero.
* streaming mappings for I/O buffers, like pci_map_single(),
* can return segments above 4GB, if the device allows.
*
* NOTE: the dma mask is visible through dev->dma_mask, so
* drivers can pass this info along ... like NETIF_F_HIGHDMA,
* Scsi_Host.highmem_io, and so forth. It's readonly to all
* host side drivers though.
*/
hcc_params = readl(&oxu->caps->hcc_params);
if (HCC_64BIT_ADDR(hcc_params))
writel(0, &oxu->regs->segment);
oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE |
CMD_ASE | CMD_RESET);
oxu->command |= CMD_RUN;
writel(oxu->command, &oxu->regs->command);
dbg_cmd(oxu, "init", oxu->command);
/*
* Start, enabling full USB 2.0 functionality ... usb 1.1 devices
* are explicitly handed to companion controller(s), so no TT is
* involved with the root hub. (Except where one is integrated,
* and there's no companion controller unless maybe for USB OTG.)
*/
hcd->state = HC_STATE_RUNNING;
writel(FLAG_CF, &oxu->regs->configured_flag);
readl(&oxu->regs->command); /* unblock posted writes */
temp = HC_VERSION(readl(&oxu->caps->hc_capbase));
oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f),
temp >> 8, temp & 0xff, DRIVER_VERSION,
ignore_oc ? ", overcurrent ignored" : "");
writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */
return 0;
}
static void oxu_stop(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
/* Turn off port power on all root hub ports. */
ehci_port_power(oxu, 0);
/* no more interrupts ... */
del_timer_sync(&oxu->watchdog);
spin_lock_irq(&oxu->lock);
if (HC_IS_RUNNING(hcd->state))
ehci_quiesce(oxu);
ehci_reset(oxu);
writel(0, &oxu->regs->intr_enable);
spin_unlock_irq(&oxu->lock);
/* let companion controllers work when we aren't */
writel(0, &oxu->regs->configured_flag);
/* root hub is shut down separately (first, when possible) */
spin_lock_irq(&oxu->lock);
if (oxu->async)
ehci_work(oxu);
spin_unlock_irq(&oxu->lock);
ehci_mem_cleanup(oxu);
dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status));
}
/* Kick in for silicon on any bus (not just pci, etc).
* This forcibly disables dma and IRQs, helping kexec and other cases
* where the next system software may expect clean state.
*/
static void oxu_shutdown(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
(void) ehci_halt(oxu);
ehci_turn_off_all_ports(oxu);
/* make BIOS/etc use companion controller during reboot */
writel(0, &oxu->regs->configured_flag);
/* unblock posted writes */
readl(&oxu->regs->configured_flag);
}
/* Non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
*
* urb + dev is in hcd.self.controller.urb_list
* we're queueing TDs onto software and hardware lists
*
* hcd-specific init for hcpriv hasn't been done yet
*
* NOTE: control, bulk, and interrupt share the same code to append TDs
* to a (possibly active) QH, and the same QH scanning code.
*/
static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
struct list_head qtd_list;
INIT_LIST_HEAD(&qtd_list);
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
default:
if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
return -ENOMEM;
return submit_async(oxu, urb, &qtd_list, mem_flags);
case PIPE_INTERRUPT:
if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
return -ENOMEM;
return intr_submit(oxu, urb, &qtd_list, mem_flags);
case PIPE_ISOCHRONOUS:
if (urb->dev->speed == USB_SPEED_HIGH)
return itd_submit(oxu, urb, mem_flags);
else
return sitd_submit(oxu, urb, mem_flags);
}
}
/* This function is responsible for breaking URBs with big data size
* into smaller size and processing small urbs in sequence.
*/
static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int num, rem;
int transfer_buffer_length;
void *transfer_buffer;
struct urb *murb;
int i, ret;
/* If not bulk pipe just enqueue the URB */
if (!usb_pipebulk(urb->pipe))
return __oxu_urb_enqueue(hcd, urb, mem_flags);
/* Otherwise we should verify the USB transfer buffer size! */
transfer_buffer = urb->transfer_buffer;
transfer_buffer_length = urb->transfer_buffer_length;
num = urb->transfer_buffer_length / 4096;
rem = urb->transfer_buffer_length % 4096;
if (rem != 0)
num++;
/* If URB is smaller than 4096 bytes just enqueue it! */
if (num == 1)
return __oxu_urb_enqueue(hcd, urb, mem_flags);
/* Ok, we have more job to do! :) */
for (i = 0; i < num - 1; i++) {
/* Get free micro URB poll till a free urb is received */
do {
murb = (struct urb *) oxu_murb_alloc(oxu);
if (!murb)
schedule();
} while (!murb);
/* Coping the urb */
memcpy(murb, urb, sizeof(struct urb));
murb->transfer_buffer_length = 4096;
murb->transfer_buffer = transfer_buffer + i * 4096;
/* Null pointer for the encodes that this is a micro urb */
murb->complete = NULL;
((struct oxu_murb *) murb)->main = urb;
((struct oxu_murb *) murb)->last = 0;
/* This loop is to guarantee urb to be processed when there's
* not enough resources at a particular time by retrying.
*/
do {
ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
if (ret)
schedule();
} while (ret);
}
/* Last urb requires special handling */
/* Get free micro URB poll till a free urb is received */
do {
murb = (struct urb *) oxu_murb_alloc(oxu);
if (!murb)
schedule();
} while (!murb);
/* Coping the urb */
memcpy(murb, urb, sizeof(struct urb));
murb->transfer_buffer_length = rem > 0 ? rem : 4096;
murb->transfer_buffer = transfer_buffer + (num - 1) * 4096;
/* Null pointer for the encodes that this is a micro urb */
murb->complete = NULL;
((struct oxu_murb *) murb)->main = urb;
((struct oxu_murb *) murb)->last = 1;
do {
ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
if (ret)
schedule();
} while (ret);
return ret;
}
/* Remove from hardware lists.
* Completions normally happen asynchronously
*/
static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
struct ehci_qh *qh;
unsigned long flags;
spin_lock_irqsave(&oxu->lock, flags);
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
default:
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
unlink_async(oxu, qh);
break;
case PIPE_INTERRUPT:
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
switch (qh->qh_state) {
case QH_STATE_LINKED:
intr_deschedule(oxu, qh);
/* FALL THROUGH */
case QH_STATE_IDLE:
qh_completions(oxu, qh);
break;
default:
oxu_dbg(oxu, "bogus qh %p state %d\n",
qh, qh->qh_state);
goto done;
}
/* reschedule QH iff another request is queued */
if (!list_empty(&qh->qtd_list)
&& HC_IS_RUNNING(hcd->state)) {
int status;
status = qh_schedule(oxu, qh);
spin_unlock_irqrestore(&oxu->lock, flags);
if (status != 0) {
/* shouldn't happen often, but ...
* FIXME kill those tds' urbs
*/
dev_err(hcd->self.controller,
"can't reschedule qh %p, err %d\n", qh,
status);
}
return status;
}
break;
}
done:
spin_unlock_irqrestore(&oxu->lock, flags);
return 0;
}
/* Bulk qh holds the data toggle */
static void oxu_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
unsigned long flags;
struct ehci_qh *qh, *tmp;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
rescan:
spin_lock_irqsave(&oxu->lock, flags);
qh = ep->hcpriv;
if (!qh)
goto done;
/* endpoints can be iso streams. for now, we don't
* accelerate iso completions ... so spin a while.
*/
if (qh->hw_info1 == 0) {
oxu_vdbg(oxu, "iso delay\n");
goto idle_timeout;
}
if (!HC_IS_RUNNING(hcd->state))
qh->qh_state = QH_STATE_IDLE;
switch (qh->qh_state) {
case QH_STATE_LINKED:
for (tmp = oxu->async->qh_next.qh;
tmp && tmp != qh;
tmp = tmp->qh_next.qh)
continue;
/* periodic qh self-unlinks on empty */
if (!tmp)
goto nogood;
unlink_async(oxu, qh);
/* FALL THROUGH */
case QH_STATE_UNLINK: /* wait for hw to finish? */
idle_timeout:
spin_unlock_irqrestore(&oxu->lock, flags);
schedule_timeout_uninterruptible(1);
goto rescan;
case QH_STATE_IDLE: /* fully unlinked */
if (list_empty(&qh->qtd_list)) {
qh_put(qh);
break;
}
/* else FALL THROUGH */
default:
nogood:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
*/
oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
qh, ep->desc.bEndpointAddress, qh->qh_state,
list_empty(&qh->qtd_list) ? "" : "(has tds)");
break;
}
ep->hcpriv = NULL;
done:
spin_unlock_irqrestore(&oxu->lock, flags);
}
static int oxu_get_frame(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
return (readl(&oxu->regs->frame_index) >> 3) %
oxu->periodic_size;
}
/* Build "status change" packet (one or two bytes) from HC registers */
static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 temp, mask, status = 0;
int ports, i, retval = 1;
unsigned long flags;
/* if !PM, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
/* init status to no-changes */
buf[0] = 0;
ports = HCS_N_PORTS(oxu->hcs_params);
if (ports > 7) {
buf[1] = 0;
retval++;
}
/* Some boards (mostly VIA?) report bogus overcurrent indications,
* causing massive log spam unless we completely ignore them. It
* may be relevant that VIA VT8235 controllers, where PORT_POWER is
* always set, seem to clear PORT_OCC and PORT_CSC when writing to
* PORT_POWER; that's surprising, but maybe within-spec.
*/
if (!ignore_oc)
mask = PORT_CSC | PORT_PEC | PORT_OCC;
else
mask = PORT_CSC | PORT_PEC;
/* no hub change reports (bit 0) for now (power, ...) */
/* port N changes (bit N)? */
spin_lock_irqsave(&oxu->lock, flags);
for (i = 0; i < ports; i++) {
temp = readl(&oxu->regs->port_status[i]);
/*
* Return status information even for ports with OWNER set.
* Otherwise hub_wq wouldn't see the disconnect event when a
* high-speed device is switched over to the companion
* controller by the user.
*/
if (!(temp & PORT_CONNECT))
oxu->reset_done[i] = 0;
if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 &&
time_after_eq(jiffies, oxu->reset_done[i]))) {
if (i < 7)
buf[0] |= 1 << (i + 1);
else
buf[1] |= 1 << (i - 7);
status = STS_PCD;
}
}
/* FIXME autosuspend idle root hubs */
spin_unlock_irqrestore(&oxu->lock, flags);
return status ? retval : 0;
}
/* Returns the speed of a device attached to a port on the root hub. */
static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
unsigned int portsc)
{
switch ((portsc >> 26) & 3) {
case 0:
return 0;
case 1:
return USB_PORT_STAT_LOW_SPEED;
case 2:
default:
return USB_PORT_STAT_HIGH_SPEED;
}
}
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
u16 wValue, u16 wIndex, char *buf, u16 wLength)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int ports = HCS_N_PORTS(oxu->hcs_params);
u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1];
u32 temp, status;
unsigned long flags;
int retval = 0;
unsigned selector;
/*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
* HCS_INDICATOR may say we can change LEDs to off/amber/green.
* (track current state ourselves) ... blink for diagnostics,
* power, "this is the one", etc. EHCI spec supports this.
*/
spin_lock_irqsave(&oxu->lock, flags);
switch (typeReq) {
case ClearHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case ClearPortFeature:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = readl(status_reg);
/*
* Even if OWNER is set, so the port is owned by the
* companion controller, hub_wq needs to be able to clear
* the port-change status bits (especially
* USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
writel(temp & ~PORT_PE, status_reg);
break;
case USB_PORT_FEAT_C_ENABLE:
writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg);
break;
case USB_PORT_FEAT_SUSPEND:
if (temp & PORT_RESET)
goto error;
if (temp & PORT_SUSPEND) {
if ((temp & PORT_PE) == 0)
goto error;
/* resume signaling for 20 msec */
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
writel(temp | PORT_RESUME, status_reg);
oxu->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
}
break;
case USB_PORT_FEAT_C_SUSPEND:
/* we auto-clear this feature */
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(oxu->hcs_params))
writel(temp & ~(PORT_RWC_BITS | PORT_POWER),
status_reg);
break;
case USB_PORT_FEAT_C_CONNECTION:
writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg);
break;
case USB_PORT_FEAT_C_RESET:
/* GetPortStatus clears reset */
break;
default:
goto error;
}
readl(&oxu->regs->command); /* unblock posted write */
break;
case GetHubDescriptor:
ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *)
buf);
break;
case GetHubStatus:
/* no hub-wide feature/status flags */
memset(buf, 0, 4);
break;
case GetPortStatus:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
status = 0;
temp = readl(status_reg);
/* wPortChange bits */
if (temp & PORT_CSC)
status |= USB_PORT_STAT_C_CONNECTION << 16;
if (temp & PORT_PEC)
status |= USB_PORT_STAT_C_ENABLE << 16;
if ((temp & PORT_OCC) && !ignore_oc)
status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/* whoever resumes must GetPortStatus to complete it!! */
if (temp & PORT_RESUME) {
/* Remote Wakeup received? */
if (!oxu->reset_done[wIndex]) {
/* resume signaling for 20 msec */
oxu->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
/* check the port again */
mod_timer(&oxu_to_hcd(oxu)->rh_timer,
oxu->reset_done[wIndex]);
}
/* resume completed? */
else if (time_after_eq(jiffies,
oxu->reset_done[wIndex])) {
status |= USB_PORT_STAT_C_SUSPEND << 16;
oxu->reset_done[wIndex] = 0;
/* stop resume signaling */
temp = readl(status_reg);
writel(temp & ~(PORT_RWC_BITS | PORT_RESUME),
status_reg);
retval = handshake(oxu, status_reg,
PORT_RESUME, 0, 2000 /* 2msec */);
if (retval != 0) {
oxu_err(oxu,
"port %d resume error %d\n",
wIndex + 1, retval);
goto error;
}
temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
}
}
/* whoever resets must GetPortStatus to complete it!! */
if ((temp & PORT_RESET)
&& time_after_eq(jiffies,
oxu->reset_done[wIndex])) {
status |= USB_PORT_STAT_C_RESET << 16;
oxu->reset_done[wIndex] = 0;
/* force reset to complete */
writel(temp & ~(PORT_RWC_BITS | PORT_RESET),
status_reg);
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
retval = handshake(oxu, status_reg,
PORT_RESET, 0, 750);
if (retval != 0) {
oxu_err(oxu, "port %d reset error %d\n",
wIndex + 1, retval);
goto error;
}
/* see what we found out */
temp = check_reset_complete(oxu, wIndex, status_reg,
readl(status_reg));
}
/* transfer dedicated ports to the companion hc */
if ((temp & PORT_CONNECT) &&
test_bit(wIndex, &oxu->companion_ports)) {
temp &= ~PORT_RWC_BITS;
temp |= PORT_OWNER;
writel(temp, status_reg);
oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1);
temp = readl(status_reg);
}
/*
* Even if OWNER is set, there's no harm letting hub_wq
* see the wPortStatus values (they should all be 0 except
* for PORT_POWER anyway).
*/
if (temp & PORT_CONNECT) {
status |= USB_PORT_STAT_CONNECTION;
/* status may be from integrated TT */
status |= oxu_port_speed(oxu, temp);
}
if (temp & PORT_PE)
status |= USB_PORT_STAT_ENABLE;
if (temp & (PORT_SUSPEND|PORT_RESUME))
status |= USB_PORT_STAT_SUSPEND;
if (temp & PORT_OC)
status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET)
status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
status |= USB_PORT_STAT_POWER;
#ifndef OXU_VERBOSE_DEBUG
if (status & ~0xffff) /* only if wPortChange is interesting */
#endif
dbg_port(oxu, "GetStatus", wIndex + 1, temp);
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
break;
case SetHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case SetPortFeature:
selector = wIndex >> 8;
wIndex &= 0xff;
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = readl(status_reg);
if (temp & PORT_OWNER)
break;
temp &= ~PORT_RWC_BITS;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if ((temp & PORT_PE) == 0
|| (temp & PORT_RESET) != 0)
goto error;
if (device_may_wakeup(&hcd->self.root_hub->dev))
temp |= PORT_WAKE_BITS;
writel(temp | PORT_SUSPEND, status_reg);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(oxu->hcs_params))
writel(temp | PORT_POWER, status_reg);
break;
case USB_PORT_FEAT_RESET:
if (temp & PORT_RESUME)
goto error;
/* line status bits may report this as low speed,
* which can be fine if this root hub has a
* transaction translator built in.
*/
oxu_vdbg(oxu, "port %d reset\n", wIndex + 1);
temp |= PORT_RESET;
temp &= ~PORT_PE;
/*
* caller must wait, then call GetPortStatus
* usb 2.0 spec says 50 ms resets on root
*/
oxu->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(50);
writel(temp, status_reg);
break;
/* For downstream facing ports (these): one hub port is put
* into test mode according to USB2 11.24.2.13, then the hub
* must be reset (which for root hub now means rmmod+modprobe,
* or else system reboot). See EHCI 2.3.9 and 4.14 for info
* about the EHCI-specific stuff.
*/
case USB_PORT_FEAT_TEST:
if (!selector || selector > 5)
goto error;
ehci_quiesce(oxu);
ehci_halt(oxu);
temp |= selector << 16;
writel(temp, status_reg);
break;
default:
goto error;
}
readl(&oxu->regs->command); /* unblock posted writes */
break;
default:
error:
/* "stall" on error */
retval = -EPIPE;
}
spin_unlock_irqrestore(&oxu->lock, flags);
return retval;
}
#ifdef CONFIG_PM
static int oxu_bus_suspend(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int port;
int mask;
oxu_dbg(oxu, "suspend root hub\n");
if (time_before(jiffies, oxu->next_statechange))
msleep(5);
port = HCS_N_PORTS(oxu->hcs_params);
spin_lock_irq(&oxu->lock);
/* stop schedules, clean any completed work */
if (HC_IS_RUNNING(hcd->state)) {
ehci_quiesce(oxu);
hcd->state = HC_STATE_QUIESCING;
}
oxu->command = readl(&oxu->regs->command);
if (oxu->reclaim)
oxu->reclaim_ready = 1;
ehci_work(oxu);
/* Unlike other USB host controller types, EHCI doesn't have
* any notion of "global" or bus-wide suspend. The driver has
* to manually suspend all the active unsuspended ports, and
* then manually resume them in the bus_resume() routine.
*/
oxu->bus_suspended = 0;
while (port--) {
u32 __iomem *reg = &oxu->regs->port_status[port];
u32 t1 = readl(reg) & ~PORT_RWC_BITS;
u32 t2 = t1;
/* keep track of which ports we suspend */
if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) &&
!(t1 & PORT_SUSPEND)) {
t2 |= PORT_SUSPEND;
set_bit(port, &oxu->bus_suspended);
}
/* enable remote wakeup on all ports */
if (device_may_wakeup(&hcd->self.root_hub->dev))
t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E;
else
t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E);
if (t1 != t2) {
oxu_vdbg(oxu, "port %d, %08x -> %08x\n",
port + 1, t1, t2);
writel(t2, reg);
}
}
/* turn off now-idle HC */
del_timer_sync(&oxu->watchdog);
ehci_halt(oxu);
hcd->state = HC_STATE_SUSPENDED;
/* allow remote wakeup */
mask = INTR_MASK;
if (!device_may_wakeup(&hcd->self.root_hub->dev))
mask &= ~STS_PCD;
writel(mask, &oxu->regs->intr_enable);
readl(&oxu->regs->intr_enable);
oxu->next_statechange = jiffies + msecs_to_jiffies(10);
spin_unlock_irq(&oxu->lock);
return 0;
}
/* Caller has locked the root hub, and should reset/reinit on error */
static int oxu_bus_resume(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 temp;
int i;
if (time_before(jiffies, oxu->next_statechange))
msleep(5);
spin_lock_irq(&oxu->lock);
/* Ideally and we've got a real resume here, and no port's power
* was lost. (For PCI, that means Vaux was maintained.) But we
* could instead be restoring a swsusp snapshot -- so that BIOS was
* the last user of the controller, not reset/pm hardware keeping
* state we gave to it.
*/
temp = readl(&oxu->regs->intr_enable);
oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss");
/* at least some APM implementations will try to deliver
* IRQs right away, so delay them until we're ready.
*/
writel(0, &oxu->regs->intr_enable);
/* re-init operational registers */
writel(0, &oxu->regs->segment);
writel(oxu->periodic_dma, &oxu->regs->frame_list);
writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
/* restore CMD_RUN, framelist size, and irq threshold */
writel(oxu->command, &oxu->regs->command);
/* Some controller/firmware combinations need a delay during which
* they set up the port statuses. See Bugzilla #8190. */
mdelay(8);
/* manually resume the ports we suspended during bus_suspend() */
i = HCS_N_PORTS(oxu->hcs_params);
while (i--) {
temp = readl(&oxu->regs->port_status[i]);
temp &= ~(PORT_RWC_BITS
| PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
temp |= PORT_RESUME;
}
writel(temp, &oxu->regs->port_status[i]);
}
i = HCS_N_PORTS(oxu->hcs_params);
mdelay(20);
while (i--) {
temp = readl(&oxu->regs->port_status[i]);
if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
temp &= ~(PORT_RWC_BITS | PORT_RESUME);
writel(temp, &oxu->regs->port_status[i]);
oxu_vdbg(oxu, "resumed port %d\n", i + 1);
}
}
(void) readl(&oxu->regs->command);
/* maybe re-activate the schedule(s) */
temp = 0;
if (oxu->async->qh_next.qh)
temp |= CMD_ASE;
if (oxu->periodic_sched)
temp |= CMD_PSE;
if (temp) {
oxu->command |= temp;
writel(oxu->command, &oxu->regs->command);
}
oxu->next_statechange = jiffies + msecs_to_jiffies(5);
hcd->state = HC_STATE_RUNNING;
/* Now we can safely re-enable irqs */
writel(INTR_MASK, &oxu->regs->intr_enable);
spin_unlock_irq(&oxu->lock);
return 0;
}
#else
static int oxu_bus_suspend(struct usb_hcd *hcd)
{
return 0;
}
static int oxu_bus_resume(struct usb_hcd *hcd)
{
return 0;
}
#endif /* CONFIG_PM */
static const struct hc_driver oxu_hc_driver = {
.description = "oxu210hp_hcd",
.product_desc = "oxu210hp HCD",
.hcd_priv_size = sizeof(struct oxu_hcd),
/*
* Generic hardware linkage
*/
.irq = oxu_irq,
.flags = HCD_MEMORY | HCD_USB2,
/*
* Basic lifecycle operations
*/
.reset = oxu_reset,
.start = oxu_run,
.stop = oxu_stop,
.shutdown = oxu_shutdown,
/*
* Managing i/o requests and associated device resources
*/
.urb_enqueue = oxu_urb_enqueue,
.urb_dequeue = oxu_urb_dequeue,
.endpoint_disable = oxu_endpoint_disable,
/*
* Scheduling support
*/
.get_frame_number = oxu_get_frame,
/*
* Root hub support
*/
.hub_status_data = oxu_hub_status_data,
.hub_control = oxu_hub_control,
.bus_suspend = oxu_bus_suspend,
.bus_resume = oxu_bus_resume,
};
/*
* Module stuff
*/
static void oxu_configuration(struct platform_device *pdev, void *base)
{
u32 tmp;
/* Initialize top level registers.
* First write ever
*/
oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
oxu_writel(base, OXU_SOFTRESET, OXU_SRESET);
oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL);
oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040);
oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN |
OXU_COMPARATOR | OXU_ASO_OP);
tmp = oxu_readl(base, OXU_CLKCTRL_SET);
oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN);
/* Clear all top interrupt enable */
oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff);
/* Clear all top interrupt status */
oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff);
/* Enable all needed top interrupt except OTG SPH core */
oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
}
static int oxu_verify_id(struct platform_device *pdev, void *base)
{
u32 id;
static const char * const bo[] = {
"reserved",
"128-pin LQFP",
"84-pin TFBGA",
"reserved",
};
/* Read controller signature register to find a match */
id = oxu_readl(base, OXU_DEVICEID);
dev_info(&pdev->dev, "device ID %x\n", id);
if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT))
return -1;
dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n",
id >> OXU_REV_SHIFT,
bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT],
(id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT,
(id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT);
return 0;
}
static const struct hc_driver oxu_hc_driver;
static struct usb_hcd *oxu_create(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
void *base, int irq, int otg)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd;
struct oxu_hcd *oxu;
int ret;
/* Set endian mode and host mode */
oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET),
OXU_USBMODE,
OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS);
hcd = usb_create_hcd(&oxu_hc_driver, dev,
otg ? "oxu210hp_otg" : "oxu210hp_sph");
if (!hcd)
return ERR_PTR(-ENOMEM);
hcd->rsrc_start = memstart;
hcd->rsrc_len = memlen;
hcd->regs = base;
hcd->irq = irq;
hcd->state = HC_STATE_HALT;
oxu = hcd_to_oxu(hcd);
oxu->is_otg = otg;
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret < 0)
return ERR_PTR(ret);
device_wakeup_enable(hcd->self.controller);
return hcd;
}
static int oxu_init(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
void *base, int irq)
{
struct oxu_info *info = platform_get_drvdata(pdev);
struct usb_hcd *hcd;
int ret;
/* First time configuration at start up */
oxu_configuration(pdev, base);
ret = oxu_verify_id(pdev, base);
if (ret) {
dev_err(&pdev->dev, "no devices found!\n");
return -ENODEV;
}
/* Create the OTG controller */
hcd = oxu_create(pdev, memstart, memlen, base, irq, 1);
if (IS_ERR(hcd)) {
dev_err(&pdev->dev, "cannot create OTG controller!\n");
ret = PTR_ERR(hcd);
goto error_create_otg;
}
info->hcd[0] = hcd;
/* Create the SPH host controller */
hcd = oxu_create(pdev, memstart, memlen, base, irq, 0);
if (IS_ERR(hcd)) {
dev_err(&pdev->dev, "cannot create SPH controller!\n");
ret = PTR_ERR(hcd);
goto error_create_sph;
}
info->hcd[1] = hcd;
oxu_writel(base, OXU_CHIPIRQEN_SET,
oxu_readl(base, OXU_CHIPIRQEN_SET) | 3);
return 0;
error_create_sph:
usb_remove_hcd(info->hcd[0]);
usb_put_hcd(info->hcd[0]);
error_create_otg:
return ret;
}
static int oxu_drv_probe(struct platform_device *pdev)
{
struct resource *res;
void *base;
unsigned long memstart, memlen;
int irq, ret;
struct oxu_info *info;
if (usb_disabled())
return -ENODEV;
/*
* Get the platform resources
*/
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(&pdev->dev,
"no IRQ! Check %s setup!\n", dev_name(&pdev->dev));
return -ENODEV;
}
irq = res->start;
dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto error;
}
memstart = res->start;
memlen = resource_size(res);
ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING);
if (ret) {
dev_err(&pdev->dev, "error setting irq type\n");
ret = -EFAULT;
goto error;
}
/* Allocate a driver data struct to hold useful info for both
* SPH & OTG devices
*/
info = devm_kzalloc(&pdev->dev, sizeof(struct oxu_info), GFP_KERNEL);
if (!info) {
ret = -EFAULT;
goto error;
}
platform_set_drvdata(pdev, info);
ret = oxu_init(pdev, memstart, memlen, base, irq);
if (ret < 0) {
dev_dbg(&pdev->dev, "cannot init USB devices\n");
goto error;
}
dev_info(&pdev->dev, "devices enabled and running\n");
platform_set_drvdata(pdev, info);
return 0;
error:
dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
return ret;
}
static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd)
{
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
}
static int oxu_drv_remove(struct platform_device *pdev)
{
struct oxu_info *info = platform_get_drvdata(pdev);
oxu_remove(pdev, info->hcd[0]);
oxu_remove(pdev, info->hcd[1]);
return 0;
}
static void oxu_drv_shutdown(struct platform_device *pdev)
{
oxu_drv_remove(pdev);
}
#if 0
/* FIXME: TODO */
static int oxu_drv_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct usb_hcd *hcd = dev_get_drvdata(dev);
return 0;
}
static int oxu_drv_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct usb_hcd *hcd = dev_get_drvdata(dev);
return 0;
}
#else
#define oxu_drv_suspend NULL
#define oxu_drv_resume NULL
#endif
static struct platform_driver oxu_driver = {
.probe = oxu_drv_probe,
.remove = oxu_drv_remove,
.shutdown = oxu_drv_shutdown,
.suspend = oxu_drv_suspend,
.resume = oxu_drv_resume,
.driver = {
.name = "oxu210hp-hcd",
.bus = &platform_bus_type
}
};
module_platform_driver(oxu_driver);
MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
MODULE_AUTHOR("Rodolfo Giometti <[email protected]>");
MODULE_LICENSE("GPL");
|
1 | static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent){struct net_device *netdev;struct atl2_adapter *adapter;static int cards_found;unsigned long mmio_start;int mmio_len;int err;cards_found = 0;err = pci_enable_device(pdev);if (err)return err;/** atl2 is a shared-high-32-bit device, so we're stuck with 32-bit DMA* until the kernel has the proper infrastructure to support 64-bit DMA* on these devices.*/if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");goto err_dma;}/* Mark all PCI regions associated with PCI device* pdev as being reserved by owner atl2_driver_name */err = pci_request_regions(pdev, atl2_driver_name);if (err)goto err_pci_reg;/* Enables bus-mastering on the device and calls* pcibios_set_master to do the needed arch specific settings */pci_set_master(pdev);err = -ENOMEM;netdev = alloc_etherdev(sizeof(struct atl2_adapter));if (!netdev)long int variable_9060261723178703496 = -6632577870690254517 * (4977113127343029580);
goto err_alloc_etherdev;SET_NETDEV_DEV(netdev, &pdev->dev);pci_set_drvdata(pdev, netdev);adapter = netdev_priv(netdev);adapter->netdev = netdev;adapter->pdev = pdev;adapter->hw.back = adapter;mmio_start = pci_resource_start(pdev, 0x0);mmio_len = pci_resource_len(pdev, 0x0);adapter->hw.mem_rang = (u32)mmio_len;adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);if (!adapter->hw.hw_addr) {err = -EIO;goto err_ioremap;}atl2_setup_pcicmd(pdev);netdev->netdev_ops = &atl2_netdev_ops;netdev->ethtool_ops = &atl2_ethtool_ops;netdev->watchdog_timeo = 5 * HZ;strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);netdev->mem_start = mmio_start;netdev->mem_end = mmio_start + mmio_len;adapter->bd_number = cards_found;adapter->pci_using_64 = false;/* setup the private structure */err = atl2_sw_init(adapter);if (err)goto err_sw_init;err = -EIO; netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);/* Init PHY as early as possible due to power saving issue */atl2_phy_init(&adapter->hw);/* reset the controller to* put the device in a known good starting state */if (atl2_reset_hw(&adapter->hw)) {err = -EIO;goto err_reset;}/* copy the MAC address out of the EEPROM */atl2_read_mac_addr(&adapter->hw);memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);if (!is_valid_ether_addr(netdev->dev_addr)) {err = -EIO;goto err_eeprom;}atl2_check_options(adapter);setup_timer(&adapter->watchdog_timer, atl2_watchdog,(unsigned long)adapter);setup_timer(&adapter->phy_config_timer, atl2_phy_config,(unsigned long)adapter);INIT_WORK(&adapter->reset_task, atl2_reset_task);INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);strcpy(netdev->name, "eth%d"); /* ?? */err = register_netdev(netdev);if (err)goto err_register;/* assume we have no link for now */netif_carrier_off(netdev);netif_stop_queue(netdev);cards_found++;return 0;err_reset:err_register:err_sw_init:err_eeprom:iounmap(adapter->hw.hw_addr);err_ioremap:free_netdev(netdev);err_alloc_etherdev:pci_release_regions(pdev);err_pci_reg:err_dma:pci_disable_device(pdev);return err;} |
1 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed){std::string variable_6895314043317600343 ("BganYi");
struct hstate *h = hstate_inode(inode);long chg = region_truncate(&inode->i_mapping->private_list, offset);spin_lock(&inode->i_lock);inode->i_blocks -= (blocks_per_huge_page(h) * freed);spin_unlock(&inode->i_lock); hugetlb_put_quota(inode->i_mapping, (chg - freed));hugetlb_acct_memory(h, -(chg - freed));} |
0 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/cryptauth/device_capability_manager_impl.h"
#include <stddef.h>
#include <chrono>
#include <memory>
#include <thread>
#include <unordered_set>
#include <utility>
#include "base/bind.h"
#include "base/logging.h"
#include "base/macros.h"
#include "components/cryptauth/mock_cryptauth_client.h"
#include "components/cryptauth/remote_device.h"
#include "components/cryptauth/remote_device_test_util.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using testing::_;
using testing::Invoke;
namespace cryptauth {
namespace {
const char kSuccessResult[] = "success";
const char kErrorOnToggleEasyUnlockResult[] = "toggleEasyUnlockError";
const char kErrorFindEligibleUnlockDevices[] = "findEligibleUnlockDeviceError";
const char kErrorFindEligibleForPromotion[] = "findEligibleForPromotionError";
const std::string kTestErrorCodeTogglingIneligibleDevice =
"togglingIneligibleDevice.";
std::vector<cryptauth::ExternalDeviceInfo>
CreateExternalDeviceInfosForRemoteDevices(
const std::vector<cryptauth::RemoteDevice> remote_devices) {
std::vector<cryptauth::ExternalDeviceInfo> device_infos;
for (const auto& remote_device : remote_devices) {
// Add an ExternalDeviceInfo with the same public key as the RemoteDevice.
cryptauth::ExternalDeviceInfo info;
info.set_public_key(remote_device.public_key);
device_infos.push_back(info);
}
return device_infos;
}
} // namespace
class DeviceCapabilityManagerImplTest
: public testing::Test,
public MockCryptAuthClientFactory::Observer {
public:
DeviceCapabilityManagerImplTest()
: all_test_external_device_infos_(
CreateExternalDeviceInfosForRemoteDevices(
cryptauth::GenerateTestRemoteDevices(5))),
test_eligible_external_devices_infos_(
{all_test_external_device_infos_[0],
all_test_external_device_infos_[1],
all_test_external_device_infos_[2]}),
test_ineligible_external_devices_infos_(
{all_test_external_device_infos_[3],
all_test_external_device_infos_[4]}) {}
void SetUp() override {
mock_cryptauth_client_factory_ =
std::make_unique<MockCryptAuthClientFactory>(
MockCryptAuthClientFactory::MockType::MAKE_NICE_MOCKS);
mock_cryptauth_client_factory_->AddObserver(this);
device_capability_manager_ = std::make_unique<DeviceCapabilityManagerImpl>(
mock_cryptauth_client_factory_.get());
}
void OnCryptAuthClientCreated(MockCryptAuthClient* client) override {
ON_CALL(*client, ToggleEasyUnlock(_, _, _))
.WillByDefault(Invoke(
this, &DeviceCapabilityManagerImplTest::MockToggleEasyUnlock));
ON_CALL(*client, FindEligibleUnlockDevices(_, _, _))
.WillByDefault(Invoke(
this,
&DeviceCapabilityManagerImplTest::MockFindEligibleUnlockDevices));
ON_CALL(*client, FindEligibleForPromotion(_, _, _))
.WillByDefault(Invoke(
this,
&DeviceCapabilityManagerImplTest::MockFindEligibleForPromotion));
}
// Mock CryptAuthClient::ToggleEasyUnlock() implementation.
void MockToggleEasyUnlock(
const ToggleEasyUnlockRequest& request,
const CryptAuthClient::ToggleEasyUnlockCallback& callback,
const CryptAuthClient::ErrorCallback& error_callback) {
toggle_easy_unlock_callback_ = callback;
error_callback_ = error_callback;
error_code_ = kErrorOnToggleEasyUnlockResult;
}
// Mock CryptAuthClient::FindEligibleUnlockDevices() implementation.
void MockFindEligibleUnlockDevices(
const FindEligibleUnlockDevicesRequest& request,
const CryptAuthClient::FindEligibleUnlockDevicesCallback& callback,
const CryptAuthClient::ErrorCallback& error_callback) {
find_eligible_unlock_devices_callback_ = callback;
error_callback_ = error_callback;
error_code_ = kErrorFindEligibleUnlockDevices;
}
// Mock CryptAuthClient::FindEligibleForPromotion() implementation.
void MockFindEligibleForPromotion(
const FindEligibleForPromotionRequest& request,
const CryptAuthClient::FindEligibleForPromotionCallback& callback,
const CryptAuthClient::ErrorCallback& error_callback) {
find_eligible_for_promotion_callback_ = callback;
error_callback_ = error_callback;
error_code_ = kErrorFindEligibleForPromotion;
}
FindEligibleUnlockDevicesResponse CreateFindEligibleUnlockDevicesResponse() {
FindEligibleUnlockDevicesResponse find_eligible_unlock_devices_response;
for (const auto& device_info : test_eligible_external_devices_infos_) {
find_eligible_unlock_devices_response.add_eligible_devices()->CopyFrom(
device_info);
}
for (const auto& device_info : test_ineligible_external_devices_infos_) {
find_eligible_unlock_devices_response.add_ineligible_devices()
->mutable_device()
->CopyFrom(device_info);
}
return find_eligible_unlock_devices_response;
}
void VerifyDeviceEligibility() {
// Ensure that resulting devices are not empty. Otherwise, following for
// loop checks will succeed on empty resulting devices.
EXPECT_TRUE(result_eligible_devices_.size() > 0);
EXPECT_TRUE(result_ineligible_devices_.size() > 0);
for (const auto& device_info : result_eligible_devices_) {
EXPECT_TRUE(
std::find_if(
test_eligible_external_devices_infos_.begin(),
test_eligible_external_devices_infos_.end(),
[&device_info](const cryptauth::ExternalDeviceInfo& device) {
return device.public_key() == device_info.public_key();
}) != test_eligible_external_devices_infos_.end());
}
for (const auto& ineligible_device : result_ineligible_devices_) {
EXPECT_TRUE(
std::find_if(test_ineligible_external_devices_infos_.begin(),
test_ineligible_external_devices_infos_.end(),
[&ineligible_device](
const cryptauth::ExternalDeviceInfo& device) {
return device.public_key() ==
ineligible_device.device().public_key();
}) != test_ineligible_external_devices_infos_.end());
}
result_eligible_devices_.clear();
result_ineligible_devices_.clear();
}
void SetCapabilityEnabled(DeviceCapabilityManagerImpl::Capability capability,
const ExternalDeviceInfo& device_info,
bool enable) {
device_capability_manager_->SetCapabilityEnabled(
device_info.public_key(), capability, enable,
base::Bind(&DeviceCapabilityManagerImplTest::
TestSuccessSetCapabilityKeyUnlockCallback,
base::Unretained(this)),
base::Bind(&DeviceCapabilityManagerImplTest::TestErrorCallback,
base::Unretained(this)));
}
void FindEligibleDevicesForCapability(
DeviceCapabilityManagerImpl::Capability capability) {
device_capability_manager_->FindEligibleDevicesForCapability(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY,
base::Bind(&DeviceCapabilityManagerImplTest::
TestSuccessFindEligibleUnlockDevicesCallback,
base::Unretained(this)),
base::Bind(&DeviceCapabilityManagerImplTest::TestErrorCallback,
base::Unretained(this)));
}
void IsPromotableDevice(DeviceCapabilityManagerImpl::Capability capability,
const std::string& public_key) {
device_capability_manager_->IsCapabilityPromotable(
public_key, capability,
base::Bind(&DeviceCapabilityManagerImplTest::
TestSuccessFindEligibleForPromotionDeviceCallback,
base::Unretained(this)),
base::Bind(&DeviceCapabilityManagerImplTest::TestErrorCallback,
base::Unretained(this)));
}
void TestSuccessSetCapabilityKeyUnlockCallback() { result_ = kSuccessResult; }
void TestSuccessFindEligibleUnlockDevicesCallback(
const std::vector<ExternalDeviceInfo>& eligible_devices,
const std::vector<IneligibleDevice>& ineligible_devices) {
result_ = kSuccessResult;
result_eligible_devices_ = eligible_devices;
result_ineligible_devices_ = ineligible_devices;
}
void TestSuccessFindEligibleForPromotionDeviceCallback(bool eligible) {
result_ = kSuccessResult;
result_eligible_for_promotion_ = eligible;
}
void TestErrorCallback(const std::string& error_message) {
result_ = error_message;
}
void InvokeSetCapabilityKeyUnlockCallback() {
CryptAuthClient::ToggleEasyUnlockCallback success_callback =
toggle_easy_unlock_callback_;
ASSERT_TRUE(!success_callback.is_null());
toggle_easy_unlock_callback_.Reset();
success_callback.Run(ToggleEasyUnlockResponse());
}
void InvokeFindEligibleUnlockDevicesCallback(
const FindEligibleUnlockDevicesResponse& retrieved_devices_response) {
CryptAuthClient::FindEligibleUnlockDevicesCallback success_callback =
find_eligible_unlock_devices_callback_;
ASSERT_TRUE(!success_callback.is_null());
find_eligible_unlock_devices_callback_.Reset();
success_callback.Run(retrieved_devices_response);
}
void InvokeFindEligibleForPromotionCallback(bool eligible_for_promotion) {
FindEligibleForPromotionResponse response;
response.set_may_show_promo(eligible_for_promotion);
CryptAuthClient::FindEligibleForPromotionCallback success_callback =
find_eligible_for_promotion_callback_;
ASSERT_TRUE(!success_callback.is_null());
find_eligible_for_promotion_callback_.Reset();
success_callback.Run(response);
}
void InvokeErrorCallback() {
CryptAuthClient::ErrorCallback error_callback = error_callback_;
ASSERT_TRUE(!error_callback.is_null());
error_callback_.Reset();
error_callback.Run(error_code_);
}
std::string GetResultAndReset() {
std::string result;
result.swap(result_);
return result;
}
bool GetEligibleForPromotionAndReset() {
bool result_eligible_for_promotion = result_eligible_for_promotion_;
result_eligible_for_promotion_ = false;
return result_eligible_for_promotion;
}
const std::vector<cryptauth::ExternalDeviceInfo>
all_test_external_device_infos_;
const std::vector<ExternalDeviceInfo> test_eligible_external_devices_infos_;
const std::vector<ExternalDeviceInfo> test_ineligible_external_devices_infos_;
std::unique_ptr<MockCryptAuthClientFactory> mock_cryptauth_client_factory_;
std::unique_ptr<cryptauth::DeviceCapabilityManagerImpl>
device_capability_manager_;
CryptAuthClient::ToggleEasyUnlockCallback toggle_easy_unlock_callback_;
CryptAuthClient::FindEligibleUnlockDevicesCallback
find_eligible_unlock_devices_callback_;
CryptAuthClient::FindEligibleForPromotionCallback
find_eligible_for_promotion_callback_;
CryptAuthClient::ErrorCallback error_callback_;
// Used by all tests that involve CryptauthClient network calls internally.
// Network call statuses are mocked out in place by |result_| and
// |error_code_| to keep track of the order in which DevicaCapabilityManager
// functions are called.
std::string result_;
std::string error_code_;
// For FindEligibleUnlockDevice tests.
std::vector<ExternalDeviceInfo> result_eligible_devices_;
std::vector<IneligibleDevice> result_ineligible_devices_;
// For FindEligibleForPromotion tests.
bool result_eligible_for_promotion_;
private:
DISALLOW_COPY_AND_ASSIGN(DeviceCapabilityManagerImplTest);
};
TEST_F(DeviceCapabilityManagerImplTest, TestOrderUponMultipleRequests) {
SetCapabilityEnabled(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY,
test_eligible_external_devices_infos_[0], true /* enable */);
FindEligibleDevicesForCapability(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY);
IsPromotableDevice(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY,
test_eligible_external_devices_infos_[0].public_key());
SetCapabilityEnabled(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY,
test_eligible_external_devices_infos_[1], true /* enable */);
FindEligibleDevicesForCapability(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY);
IsPromotableDevice(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY,
test_eligible_external_devices_infos_[1].public_key());
InvokeSetCapabilityKeyUnlockCallback();
EXPECT_EQ(kSuccessResult, GetResultAndReset());
InvokeFindEligibleUnlockDevicesCallback(
CreateFindEligibleUnlockDevicesResponse());
EXPECT_EQ(kSuccessResult, GetResultAndReset());
VerifyDeviceEligibility();
InvokeFindEligibleForPromotionCallback(true /* eligible */);
EXPECT_EQ(kSuccessResult, GetResultAndReset());
EXPECT_TRUE(GetEligibleForPromotionAndReset());
InvokeSetCapabilityKeyUnlockCallback();
EXPECT_EQ(kSuccessResult, GetResultAndReset());
InvokeFindEligibleUnlockDevicesCallback(
CreateFindEligibleUnlockDevicesResponse());
EXPECT_EQ(kSuccessResult, GetResultAndReset());
VerifyDeviceEligibility();
InvokeFindEligibleForPromotionCallback(true /* eligible */);
EXPECT_EQ(kSuccessResult, GetResultAndReset());
EXPECT_TRUE(GetEligibleForPromotionAndReset());
}
TEST_F(DeviceCapabilityManagerImplTest, TestMultipleSetUnlocksRequests) {
SetCapabilityEnabled(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY,
test_eligible_external_devices_infos_[0], true /* enable */);
SetCapabilityEnabled(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY,
test_eligible_external_devices_infos_[1], true /* enable */);
SetCapabilityEnabled(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY,
test_eligible_external_devices_infos_[2], true /* enable */);
InvokeErrorCallback();
EXPECT_EQ(kErrorOnToggleEasyUnlockResult, GetResultAndReset());
InvokeSetCapabilityKeyUnlockCallback();
EXPECT_EQ(kSuccessResult, GetResultAndReset());
InvokeSetCapabilityKeyUnlockCallback();
EXPECT_EQ(kSuccessResult, GetResultAndReset());
}
TEST_F(DeviceCapabilityManagerImplTest,
TestMultipleFindEligibleForUnlockDevicesRequests) {
FindEligibleDevicesForCapability(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY);
FindEligibleDevicesForCapability(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY);
FindEligibleDevicesForCapability(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY);
InvokeFindEligibleUnlockDevicesCallback(
CreateFindEligibleUnlockDevicesResponse());
EXPECT_EQ(kSuccessResult, GetResultAndReset());
VerifyDeviceEligibility();
InvokeErrorCallback();
EXPECT_EQ(kErrorFindEligibleUnlockDevices, GetResultAndReset());
InvokeFindEligibleUnlockDevicesCallback(
CreateFindEligibleUnlockDevicesResponse());
EXPECT_EQ(kSuccessResult, GetResultAndReset());
VerifyDeviceEligibility();
}
TEST_F(DeviceCapabilityManagerImplTest, TestMultipleIsPromotableRequests) {
IsPromotableDevice(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY,
test_eligible_external_devices_infos_[0].public_key());
IsPromotableDevice(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY,
test_eligible_external_devices_infos_[1].public_key());
IsPromotableDevice(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY,
test_eligible_external_devices_infos_[2].public_key());
InvokeFindEligibleForPromotionCallback(true /*eligible*/);
EXPECT_EQ(kSuccessResult, GetResultAndReset());
EXPECT_TRUE(GetEligibleForPromotionAndReset());
InvokeFindEligibleForPromotionCallback(true /*eligible*/);
EXPECT_EQ(kSuccessResult, GetResultAndReset());
EXPECT_TRUE(GetEligibleForPromotionAndReset());
InvokeErrorCallback();
EXPECT_EQ(kErrorFindEligibleForPromotion, GetResultAndReset());
}
TEST_F(DeviceCapabilityManagerImplTest, TestOrderViaMultipleErrors) {
SetCapabilityEnabled(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY,
test_eligible_external_devices_infos_[0], true /* enable */);
FindEligibleDevicesForCapability(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY);
IsPromotableDevice(
DeviceCapabilityManagerImpl::Capability::CAPABILITY_UNLOCK_KEY,
test_eligible_external_devices_infos_[0].public_key());
InvokeErrorCallback();
EXPECT_EQ(kErrorOnToggleEasyUnlockResult, GetResultAndReset());
InvokeErrorCallback();
EXPECT_EQ(kErrorFindEligibleUnlockDevices, GetResultAndReset());
InvokeErrorCallback();
EXPECT_EQ(kErrorFindEligibleForPromotion, GetResultAndReset());
}
} // namespace cryptauth
|